List of usage examples for org.apache.commons.cli OptionGroup addOption
public OptionGroup addOption(Option option)
Option
to this group. From source file:org.apache.hadoop.hive.metastore.tools.SchemaToolCommandLine.java
@SuppressWarnings("static-access") private Options createOptions(OptionGroup additionalOptions) { Option help = new Option("help", "print this message"); Option infoOpt = new Option("info", "Show config and schema details"); Option upgradeOpt = new Option("upgradeSchema", "Schema upgrade"); Option upgradeFromOpt = OptionBuilder.withArgName("upgradeFrom").hasArg() .withDescription("Schema upgrade from a version").create("upgradeSchemaFrom"); Option initOpt = new Option("initSchema", "Schema initialization"); Option initToOpt = OptionBuilder.withArgName("initTo").hasArg() .withDescription("Schema initialization to a version").create("initSchemaTo"); Option initOrUpgradeSchemaOpt = new Option("initOrUpgradeSchema", "Initialize or upgrade schema to latest version"); Option validateOpt = new Option("validate", "Validate the database"); Option createCatalog = OptionBuilder.hasArg() .withDescription("Create a catalog, requires --catalogLocation parameter as well") .create("createCatalog"); Option alterCatalog = OptionBuilder.hasArg() .withDescription(/* w w w . j a va2 s . c o m*/ "Alter a catalog, requires --catalogLocation and/or --catalogDescription parameter as well") .create("alterCatalog"); Option moveDatabase = OptionBuilder.hasArg() .withDescription("Move a database between catalogs. Argument is the database name. " + "Requires --fromCatalog and --toCatalog parameters as well") .create("moveDatabase"); Option moveTable = OptionBuilder.hasArg() .withDescription("Move a table to a different database. Argument is the table name. " + "Requires --fromCatalog, --toCatalog, --fromDatabase, and --toDatabase " + " parameters as well.") .create("moveTable"); Option createUserOpt = new Option("createUser", "Create the Hive user, set hiveUser to the db" + " admin user and the hive password to the db admin password with this"); OptionGroup optGroup = new OptionGroup(); optGroup.addOption(help).addOption(infoOpt).addOption(upgradeOpt).addOption(upgradeFromOpt) .addOption(initOpt).addOption(initToOpt).addOption(initOrUpgradeSchemaOpt).addOption(validateOpt) .addOption(createCatalog).addOption(alterCatalog).addOption(moveDatabase).addOption(moveTable) .addOption(createUserOpt); optGroup.setRequired(true); Option userNameOpt = OptionBuilder.withArgName("user").hasArgs() .withDescription("Override config file user name").create("userName"); Option passwdOpt = OptionBuilder.withArgName("password").hasArgs() .withDescription("Override config file password").create("passWord"); Option dbTypeOpt = OptionBuilder.withArgName("databaseType").hasArgs() .withDescription("Metastore database type").isRequired().create("dbType"); Option hiveUserOpt = OptionBuilder.hasArg().withDescription("Hive user (for use with createUser)") .create("hiveUser"); Option hivePasswdOpt = OptionBuilder.hasArg().withDescription("Hive password (for use with createUser)") .create("hivePassword"); Option hiveDbOpt = OptionBuilder.hasArg().withDescription("Hive database (for use with createUser)") .create("hiveDb"); /* Option metaDbTypeOpt = OptionBuilder.withArgName("metaDatabaseType") .hasArgs().withDescription("Used only if upgrading the system catalog for hive") .create("metaDbType"); */ Option urlOpt = OptionBuilder.withArgName("url").hasArgs().withDescription("connection url to the database") .create("url"); Option driverOpt = OptionBuilder.withArgName("driver").hasArgs() .withDescription("driver name for connection").create("driver"); Option dbOpts = OptionBuilder.withArgName("databaseOpts").hasArgs() .withDescription("Backend DB specific options").create("dbOpts"); Option dryRunOpt = new Option("dryRun", "list SQL scripts (no execute)"); Option verboseOpt = new Option("verbose", "only print SQL statements"); Option serversOpt = OptionBuilder.withArgName("serverList").hasArgs() .withDescription("a comma-separated list of servers used in location validation in the format of " + "scheme://authority (e.g. hdfs://localhost:8000)") .create("servers"); Option catalogLocation = OptionBuilder.hasArg() .withDescription("Location of new catalog, required when adding a catalog") .create("catalogLocation"); Option catalogDescription = OptionBuilder.hasArg().withDescription("Description of new catalog") .create("catalogDescription"); Option ifNotExists = OptionBuilder .withDescription("If passed then it is not an error to create an existing catalog") .create("ifNotExists"); Option fromCatalog = OptionBuilder.hasArg() .withDescription("Catalog a moving database or table is coming from. This is " + "required if you are moving a database or table.") .create("fromCatalog"); Option toCatalog = OptionBuilder.hasArg() .withDescription("Catalog a moving database or table is going to. This is " + "required if you are moving a database or table.") .create("toCatalog"); Option fromDatabase = OptionBuilder.hasArg() .withDescription( "Database a moving table is coming from. This is " + "required if you are moving a table.") .create("fromDatabase"); Option toDatabase = OptionBuilder.hasArg() .withDescription( "Database a moving table is going to. This is " + "required if you are moving a table.") .create("toDatabase"); Options options = new Options(); options.addOption(help); options.addOptionGroup(optGroup); options.addOption(dbTypeOpt); //options.addOption(metaDbTypeOpt); options.addOption(userNameOpt); options.addOption(passwdOpt); options.addOption(urlOpt); options.addOption(driverOpt); options.addOption(dbOpts); options.addOption(dryRunOpt); options.addOption(verboseOpt); options.addOption(serversOpt); options.addOption(catalogLocation); options.addOption(catalogDescription); options.addOption(ifNotExists); options.addOption(fromCatalog); options.addOption(toCatalog); options.addOption(fromDatabase); options.addOption(toDatabase); options.addOption(hiveUserOpt); options.addOption(hivePasswdOpt); options.addOption(hiveDbOpt); if (additionalOptions != null) options.addOptionGroup(additionalOptions); return options; }
From source file:org.apache.helix.mock.participant.DummyProcess.java
@SuppressWarnings("static-access") synchronized private static Options constructCommandLineOptions() { Option helpOption = OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info") .create();/*from w ww .java 2 s .c om*/ Option clusterOption = OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create(); clusterOption.setArgs(1); clusterOption.setRequired(true); clusterOption.setArgName("Cluster name (Required)"); Option hostOption = OptionBuilder.withLongOpt(hostAddress).withDescription("Provide host name").create(); hostOption.setArgs(1); hostOption.setRequired(true); hostOption.setArgName("Host name (Required)"); Option portOption = OptionBuilder.withLongOpt(hostPort).withDescription("Provide host port").create(); portOption.setArgs(1); portOption.setRequired(true); portOption.setArgName("Host port (Required)"); Option cmTypeOption = OptionBuilder.withLongOpt(helixManagerType) .withDescription("Provide cluster manager type (e.g. 'zk', 'static-file', or 'dynamic-file'") .create(); cmTypeOption.setArgs(1); cmTypeOption.setRequired(true); cmTypeOption.setArgName("Clsuter manager type (e.g. 'zk', 'static-file', or 'dynamic-file') (Required)"); Option zkServerOption = OptionBuilder.withLongOpt(zkServer).withDescription("Provide zookeeper address") .create(); zkServerOption.setArgs(1); zkServerOption.setRequired(true); zkServerOption.setArgName("ZookeeperServerAddress(Required for zk-based cluster manager)"); // Option rootNsOption = OptionBuilder.withLongOpt(rootNamespace) // .withDescription("Provide root namespace for dynamic-file based cluster manager").create(); // rootNsOption.setArgs(1); // rootNsOption.setRequired(true); // rootNsOption.setArgName("Root namespace (Required for dynamic-file based cluster manager)"); Option transDelayOption = OptionBuilder.withLongOpt(transDelay).withDescription("Provide state trans delay") .create(); transDelayOption.setArgs(1); transDelayOption.setRequired(false); transDelayOption.setArgName("Delay time in state transition, in MS"); OptionGroup optionGroup = new OptionGroup(); optionGroup.addOption(zkServerOption); Options options = new Options(); options.addOption(helpOption); options.addOption(clusterOption); options.addOption(hostOption); options.addOption(portOption); options.addOption(transDelayOption); options.addOption(cmTypeOption); options.addOptionGroup(optionGroup); return options; }
From source file:org.apache.helix.provisioning.tools.ContainerAdmin.java
@SuppressWarnings("static-access") public static void main(String[] args) throws Exception { Option zkServerOption = OptionBuilder.withLongOpt("zookeeperAddress") .withDescription("Provide zookeeper address").create(); zkServerOption.setArgs(1);//from w w w . ja v a 2 s .co m zkServerOption.setRequired(true); zkServerOption.setArgName("zookeeperAddress(Required)"); OptionGroup group = new OptionGroup(); group.setRequired(true); // update container count per service Option stopContainerOption = OptionBuilder.withLongOpt(stopContainer) .withDescription("appName participantName").create(); stopContainerOption.setArgs(2); stopContainerOption.setRequired(false); stopContainerOption.setArgName("appName participantName"); group.addOption(stopContainerOption); Options options = new Options(); options.addOption(zkServerOption); options.addOptionGroup(group); CommandLine cliParser = new GnuParser().parse(options, args); String zkAddress = cliParser.getOptionValue("zookeeperAddress"); ContainerAdmin admin = new ContainerAdmin(zkAddress); if (cliParser.hasOption(stopContainer)) { String appName = cliParser.getOptionValues(stopContainer)[0]; String participantName = cliParser.getOptionValues(stopContainer)[1]; admin.stopContainer(appName, participantName); } }
From source file:org.apache.helix.provisioning.tools.UpdateProvisionerConfig.java
@SuppressWarnings("static-access") public static void main(String[] args) throws ParseException { Option zkServerOption = OptionBuilder.withLongOpt("zookeeperAddress") .withDescription("Provide zookeeper address").create(); zkServerOption.setArgs(1);/*from w ww .ja v a 2s .c o m*/ zkServerOption.setRequired(true); zkServerOption.setArgName("zookeeperAddress(Required)"); OptionGroup group = new OptionGroup(); group.setRequired(true); // update container count per service Option updateContainerCountOption = OptionBuilder.withLongOpt(updateContainerCount) .withDescription("appName serviceName numContainers").create(); updateContainerCountOption.setArgs(3); updateContainerCountOption.setRequired(false); updateContainerCountOption.setArgName("appName serviceName numContainers"); group.addOption(updateContainerCountOption); Options options = new Options(); options.addOption(zkServerOption); options.addOptionGroup(group); CommandLine cliParser = new GnuParser().parse(options, args); String zkAddress = cliParser.getOptionValue("zookeeperAddress"); UpdateProvisionerConfig updater = new UpdateProvisionerConfig(zkAddress); if (cliParser.hasOption(updateContainerCount)) { String appName = cliParser.getOptionValues(updateContainerCount)[0]; String serviceName = cliParser.getOptionValues(updateContainerCount)[1]; int numContainers = Integer.parseInt(cliParser.getOptionValues(updateContainerCount)[2]); updater.setNumContainers(appName, serviceName, numContainers); } }
From source file:org.apache.helix.task.TaskDriver.java
/** Constructs option group containing options required by all drivable jobs */ @SuppressWarnings("static-access") private static OptionGroup contructGenericRequiredOptionGroup() { Option zkAddressOption = OptionBuilder.isRequired().withLongOpt(ZK_ADDRESS) .withDescription("ZK address managing cluster").create(); zkAddressOption.setArgs(1);/* www .j a va2 s . com*/ zkAddressOption.setArgName("zkAddress"); Option clusterNameOption = OptionBuilder.isRequired().withLongOpt(CLUSTER_NAME_OPTION) .withDescription("Cluster name").create(); clusterNameOption.setArgs(1); clusterNameOption.setArgName("clusterName"); Option taskResourceOption = OptionBuilder.isRequired().withLongOpt(RESOURCE_OPTION) .withDescription("Workflow or job name").create(); taskResourceOption.setArgs(1); taskResourceOption.setArgName("resourceName"); OptionGroup group = new OptionGroup(); group.addOption(zkAddressOption); group.addOption(clusterNameOption); group.addOption(taskResourceOption); return group; }
From source file:org.apache.helix.task.TaskDriver.java
/** Constructs option group containing options required by all drivable jobs */ private static OptionGroup constructStartOptionGroup() { @SuppressWarnings("static-access") Option workflowFileOption = OptionBuilder.withLongOpt(WORKFLOW_FILE_OPTION) .withDescription("Local file describing workflow").create(); workflowFileOption.setArgs(1);//from w w w.j a v a 2 s . c o m workflowFileOption.setArgName("workflowFile"); OptionGroup group = new OptionGroup(); group.addOption(workflowFileOption); return group; }
From source file:org.apache.helix.tools.ClusterSetup.java
@SuppressWarnings("static-access") private static Options constructCommandLineOptions() { Option helpOption = OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info") .create();//from www.ja v a2 s. c o m Option zkServerOption = OptionBuilder.withLongOpt(zkServerAddress) .withDescription("Provide zookeeper address").create(); zkServerOption.setArgs(1); zkServerOption.setRequired(true); zkServerOption.setArgName("ZookeeperServerAddress(Required)"); Option listClustersOption = OptionBuilder.withLongOpt(listClusters) .withDescription("List existing clusters").create(); listClustersOption.setArgs(0); listClustersOption.setRequired(false); Option listResourceOption = OptionBuilder.withLongOpt(listResources) .withDescription("List resources hosted in a cluster").create(); listResourceOption.setArgs(1); listResourceOption.setRequired(false); listResourceOption.setArgName("clusterName"); Option listInstancesOption = OptionBuilder.withLongOpt(listInstances) .withDescription("List Instances in a cluster").create(); listInstancesOption.setArgs(1); listInstancesOption.setRequired(false); listInstancesOption.setArgName("clusterName"); Option addClusterOption = OptionBuilder.withLongOpt(addCluster).withDescription("Add a new cluster") .create(); addClusterOption.setArgs(1); addClusterOption.setRequired(false); addClusterOption.setArgName("clusterName"); Option activateClusterOption = OptionBuilder.withLongOpt(activateCluster) .withDescription("Enable/disable a cluster in distributed controller mode").create(); activateClusterOption.setArgs(3); activateClusterOption.setRequired(false); activateClusterOption.setArgName("clusterName grandCluster true/false"); Option deleteClusterOption = OptionBuilder.withLongOpt(dropCluster).withDescription("Delete a cluster") .create(); deleteClusterOption.setArgs(1); deleteClusterOption.setRequired(false); deleteClusterOption.setArgName("clusterName"); Option addInstanceOption = OptionBuilder.withLongOpt(addInstance) .withDescription("Add a new Instance to a cluster").create(); addInstanceOption.setArgs(2); addInstanceOption.setRequired(false); addInstanceOption.setArgName("clusterName InstanceId"); Option addResourceOption = OptionBuilder.withLongOpt(addResource) .withDescription("Add a resource to a cluster").create(); addResourceOption.setArgs(4); addResourceOption.setRequired(false); addResourceOption.setArgName("clusterName resourceName partitionNum stateModelRef <-mode modeValue>"); Option expandResourceOption = OptionBuilder.withLongOpt(expandResource) .withDescription("Expand resource to additional nodes").create(); expandResourceOption.setArgs(2); expandResourceOption.setRequired(false); expandResourceOption.setArgName("clusterName resourceName"); Option expandClusterOption = OptionBuilder.withLongOpt(expandCluster) .withDescription("Expand a cluster and all the resources").create(); expandClusterOption.setArgs(1); expandClusterOption.setRequired(false); expandClusterOption.setArgName("clusterName"); Option resourceModeOption = OptionBuilder.withLongOpt(mode) .withDescription("Specify resource mode, used with addResourceGroup command").create(); resourceModeOption.setArgs(1); resourceModeOption.setRequired(false); resourceModeOption.setArgName("IdealState mode"); Option resourceBucketSizeOption = OptionBuilder.withLongOpt(bucketSize) .withDescription("Specify size of a bucket, used with addResourceGroup command").create(); resourceBucketSizeOption.setArgs(1); resourceBucketSizeOption.setRequired(false); resourceBucketSizeOption.setArgName("Size of a bucket for a resource"); Option maxPartitionsPerNodeOption = OptionBuilder.withLongOpt(maxPartitionsPerNode) .withDescription("Specify max partitions per node, used with addResourceGroup command").create(); maxPartitionsPerNodeOption.setArgs(1); maxPartitionsPerNodeOption.setRequired(false); maxPartitionsPerNodeOption.setArgName("Max partitions per node for a resource"); Option resourceKeyOption = OptionBuilder.withLongOpt(resourceKeyPrefix) .withDescription("Specify resource key prefix, used with rebalance command").create(); resourceKeyOption.setArgs(1); resourceKeyOption.setRequired(false); resourceKeyOption.setArgName("Resource key prefix"); Option instanceGroupTagOption = OptionBuilder.withLongOpt(instanceGroupTag) .withDescription("Specify instance group tag, used with rebalance command").create(); instanceGroupTagOption.setArgs(1); instanceGroupTagOption.setRequired(false); instanceGroupTagOption.setArgName("Instance group tag"); Option addStateModelDefOption = OptionBuilder.withLongOpt(addStateModelDef) .withDescription("Add a State model to a cluster").create(); addStateModelDefOption.setArgs(2); addStateModelDefOption.setRequired(false); addStateModelDefOption.setArgName("clusterName <filename>"); Option addIdealStateOption = OptionBuilder.withLongOpt(addIdealState) .withDescription("Add a State model to a cluster").create(); addIdealStateOption.setArgs(3); addIdealStateOption.setRequired(false); addIdealStateOption.setArgName("clusterName resourceName <filename>"); Option dropInstanceOption = OptionBuilder.withLongOpt(dropInstance) .withDescription("Drop an existing Instance from a cluster").create(); dropInstanceOption.setArgs(2); dropInstanceOption.setRequired(false); dropInstanceOption.setArgName("clusterName InstanceId"); Option swapInstanceOption = OptionBuilder.withLongOpt(swapInstance) .withDescription("Swap an old instance from a cluster with a new instance").create(); swapInstanceOption.setArgs(3); swapInstanceOption.setRequired(false); swapInstanceOption.setArgName("clusterName oldInstance newInstance"); Option dropResourceOption = OptionBuilder.withLongOpt(dropResource) .withDescription("Drop an existing resource from a cluster").create(); dropResourceOption.setArgs(2); dropResourceOption.setRequired(false); dropResourceOption.setArgName("clusterName resourceName"); Option enableResourceOption = OptionBuilder.withLongOpt(enableResource) .withDescription("Enable/disable a resource").hasArgs(3).isRequired(false) .withArgName("clusterName resourceName true/false").create(); Option rebalanceOption = OptionBuilder.withLongOpt(rebalance) .withDescription("Rebalance a resource in a cluster").create(); rebalanceOption.setArgs(3); rebalanceOption.setRequired(false); rebalanceOption.setArgName("clusterName resourceName replicas"); Option instanceInfoOption = OptionBuilder.withLongOpt(listInstanceInfo) .withDescription("Query info of a Instance in a cluster").create(); instanceInfoOption.setArgs(2); instanceInfoOption.setRequired(false); instanceInfoOption.setArgName("clusterName InstanceName"); Option clusterInfoOption = OptionBuilder.withLongOpt(listClusterInfo) .withDescription("Query info of a cluster").create(); clusterInfoOption.setArgs(1); clusterInfoOption.setRequired(false); clusterInfoOption.setArgName("clusterName"); Option resourceInfoOption = OptionBuilder.withLongOpt(listResourceInfo) .withDescription("Query info of a resource").create(); resourceInfoOption.setArgs(2); resourceInfoOption.setRequired(false); resourceInfoOption.setArgName("clusterName resourceName"); Option addResourcePropertyOption = OptionBuilder.withLongOpt(addResourceProperty) .withDescription("Add a resource property").create(); addResourcePropertyOption.setArgs(4); addResourcePropertyOption.setRequired(false); addResourcePropertyOption.setArgName("clusterName resourceName propertyName propertyValue"); Option removeResourcePropertyOption = OptionBuilder.withLongOpt(removeResourceProperty) .withDescription("Remove a resource property").create(); removeResourcePropertyOption.setArgs(3); removeResourcePropertyOption.setRequired(false); removeResourcePropertyOption.setArgName("clusterName resourceName propertyName"); Option partitionInfoOption = OptionBuilder.withLongOpt(listPartitionInfo) .withDescription("Query info of a partition").create(); partitionInfoOption.setArgs(3); partitionInfoOption.setRequired(false); partitionInfoOption.setArgName("clusterName resourceName partitionName"); Option enableInstanceOption = OptionBuilder.withLongOpt(enableInstance) .withDescription("Enable/disable an instance").create(); enableInstanceOption.setArgs(3); enableInstanceOption.setRequired(false); enableInstanceOption.setArgName("clusterName instanceName true/false"); Option enablePartitionOption = OptionBuilder.hasArgs().withLongOpt(enablePartition) .withDescription("Enable/disable partitions").create(); enablePartitionOption.setRequired(false); enablePartitionOption.setArgName("true/false clusterName instanceName resourceName partitionName1..."); Option enableClusterOption = OptionBuilder.withLongOpt(enableCluster) .withDescription("pause/resume the controller of a cluster").create(); enableClusterOption.setArgs(2); enableClusterOption.setRequired(false); enableClusterOption.setArgName("clusterName true/false"); Option resetPartitionOption = OptionBuilder.withLongOpt(resetPartition) .withDescription("Reset a partition in error state").create(); resetPartitionOption.setArgs(4); resetPartitionOption.setRequired(false); resetPartitionOption.setArgName("clusterName instanceName resourceName partitionName"); Option resetInstanceOption = OptionBuilder.withLongOpt(resetInstance) .withDescription("Reset all partitions in error state for an instance").create(); resetInstanceOption.setArgs(2); resetInstanceOption.setRequired(false); resetInstanceOption.setArgName("clusterName instanceName"); Option resetResourceOption = OptionBuilder.withLongOpt(resetResource) .withDescription("Reset all partitions in error state for a resource").create(); resetResourceOption.setArgs(2); resetResourceOption.setRequired(false); resetResourceOption.setArgName("clusterName resourceName"); Option listStateModelsOption = OptionBuilder.withLongOpt(listStateModels) .withDescription("Query info of state models in a cluster").create(); listStateModelsOption.setArgs(1); listStateModelsOption.setRequired(false); listStateModelsOption.setArgName("clusterName"); Option listStateModelOption = OptionBuilder.withLongOpt(listStateModel) .withDescription("Query info of a state model in a cluster").create(); listStateModelOption.setArgs(2); listStateModelOption.setRequired(false); listStateModelOption.setArgName("clusterName stateModelName"); Option addInstanceTagOption = OptionBuilder.withLongOpt(addInstanceTag) .withDescription("Add a tag to instance").create(); addInstanceTagOption.setArgs(3); addInstanceTagOption.setRequired(false); addInstanceTagOption.setArgName("clusterName instanceName tag"); Option removeInstanceTagOption = OptionBuilder.withLongOpt(removeInstanceTag) .withDescription("Remove tag from instance").create(); removeInstanceTagOption.setArgs(3); removeInstanceTagOption.setRequired(false); removeInstanceTagOption.setArgName("clusterName instanceName tag"); // TODO need deal with resource-names containing "," // set/get/remove configs options Option setConfOption = OptionBuilder.hasArgs(3).isRequired(false).withArgName( "ConfigScope(e.g. RESOURCE) ConfigScopeArgs(e.g. myCluster,testDB) KeyValueMap(e.g. k1=v1,k2=v2)") .withLongOpt(setConfig).withDescription("Set configs").create(); Option getConfOption = OptionBuilder.hasArgs(3).isRequired(false) .withArgName("ConfigScope(e.g. RESOURCE) ConfigScopeArgs(e.g. myCluster,testDB) Keys(e.g. k1,k2)") .withLongOpt(getConfig).withDescription("Get configs").create(); Option removeConfOption = OptionBuilder.hasArgs(3).isRequired(false) .withArgName("ConfigScope(e.g. RESOURCE) ConfigScopeArgs(e.g. myCluster,testDB) Keys(e.g. k1,k2)") .withLongOpt(removeConfig).withDescription("Remove configs").create(); // set/get/remove constraints options Option setConstraintOption = OptionBuilder.hasArgs(4).isRequired(false).withArgName( "clusterName ConstraintType(e.g. MESSAGE_CONSTRAINT) ConstraintId KeyValueMap(e.g. k1=v1,k2=v2)") .withLongOpt(setConstraint) .withDescription("Set a constraint associated with a give id. create if not exist").create(); Option getConstraintsOption = OptionBuilder.hasArgs(2).isRequired(false) .withArgName("clusterName ConstraintType(e.g. MESSAGE_CONSTRAINT)").withLongOpt(getConstraints) .withDescription("Get constraints associated with given type").create(); Option removeConstraintOption = OptionBuilder.hasArgs(3).isRequired(false) .withArgName("clusterName ConstraintType(e.g. MESSAGE_CONSTRAINT) ConstraintId") .withLongOpt(removeConstraint).withDescription("Remove a constraint associated with given id") .create(); OptionGroup group = new OptionGroup(); group.setRequired(true); group.addOption(rebalanceOption); group.addOption(addResourceOption); group.addOption(resourceModeOption); group.addOption(resourceBucketSizeOption); group.addOption(maxPartitionsPerNodeOption); group.addOption(expandResourceOption); group.addOption(expandClusterOption); group.addOption(resourceKeyOption); group.addOption(addClusterOption); group.addOption(activateClusterOption); group.addOption(deleteClusterOption); group.addOption(addInstanceOption); group.addOption(listInstancesOption); group.addOption(listResourceOption); group.addOption(listClustersOption); group.addOption(addIdealStateOption); group.addOption(rebalanceOption); group.addOption(dropInstanceOption); group.addOption(swapInstanceOption); group.addOption(dropResourceOption); group.addOption(enableResourceOption); group.addOption(instanceInfoOption); group.addOption(clusterInfoOption); group.addOption(resourceInfoOption); group.addOption(partitionInfoOption); group.addOption(enableInstanceOption); group.addOption(enablePartitionOption); group.addOption(enableClusterOption); group.addOption(resetPartitionOption); group.addOption(resetInstanceOption); group.addOption(resetResourceOption); group.addOption(addStateModelDefOption); group.addOption(listStateModelsOption); group.addOption(listStateModelOption); group.addOption(addResourcePropertyOption); group.addOption(removeResourcePropertyOption); // set/get/remove config options group.addOption(setConfOption); group.addOption(getConfOption); group.addOption(removeConfOption); // set/get/remove constraint options group.addOption(setConstraintOption); group.addOption(getConstraintsOption); group.addOption(removeConstraintOption); group.addOption(addInstanceTagOption); group.addOption(removeInstanceTagOption); group.addOption(instanceGroupTagOption); Options options = new Options(); options.addOption(helpOption); options.addOption(zkServerOption); options.addOptionGroup(group); return options; }
From source file:org.apache.helix.tools.IntegrationTestUtil.java
@SuppressWarnings("static-access") static Options constructCommandLineOptions() { Option helpOption = OptionBuilder.withLongOpt(help) .withDescription("Prints command-line options information").create(); Option zkSvrOption = OptionBuilder.hasArgs(1).isRequired(true).withArgName("zookeeperAddress") .withLongOpt(zkSvr).withDescription("Provide zookeeper-address").create(); Option verifyExternalViewOption = OptionBuilder.hasArgs().isRequired(false) .withArgName("clusterName node1 node2..").withLongOpt(verifyExternalView) .withDescription("Verify external-view").create(); Option verifyLiveNodesOption = OptionBuilder.hasArg().isRequired(false) .withArgName("clusterName node1, node2..").withLongOpt(verifyLiveNodes) .withDescription("Verify live-nodes").create(); Option readZNodeOption = OptionBuilder.hasArgs(1).isRequired(false).withArgName("zkPath") .withLongOpt(readZNode).withDescription("Read znode").create(); Option readLeaderOption = OptionBuilder.hasArgs(1).isRequired(false).withArgName("clusterName") .withLongOpt(readLeader).withDescription("Read cluster controller").create(); OptionGroup optGroup = new OptionGroup(); optGroup.setRequired(true);//from w ww .ja va2s .com optGroup.addOption(verifyExternalViewOption); optGroup.addOption(verifyLiveNodesOption); optGroup.addOption(readZNodeOption); optGroup.addOption(readLeaderOption); Options options = new Options(); options.addOption(helpOption); options.addOption(zkSvrOption); options.addOptionGroup(optGroup); return options; }
From source file:org.apache.helix.tools.ZkGrep.java
@SuppressWarnings("static-access") private static Options constructCommandLineOptions() { Option zkCfgOption = OptionBuilder.hasArgs(1).isRequired(false).withLongOpt(zkCfg).withArgName("zoo.cfg") .withDescription("provide zoo.cfg").create(); Option patternOption = OptionBuilder.hasArgs().isRequired(true).withLongOpt(pattern) .withArgName("grep-patterns...").withDescription("provide patterns (required)").create(); Option betweenOption = OptionBuilder.hasArgs(2).isRequired(false).withLongOpt(between) .withArgName("t1 t2 (timestamp in ms or yyMMdd_hhmmss_SSS)") .withDescription("grep between t1 and t2").create(); Option byOption = OptionBuilder.hasArgs(1).isRequired(false).withLongOpt(by) .withArgName("t (timestamp in ms or yyMMdd_hhmmss_SSS)").withDescription("grep by t").create(); OptionGroup group = new OptionGroup(); group.setRequired(true);/* www . j a va 2 s . c o m*/ group.addOption(betweenOption); group.addOption(byOption); Options options = new Options(); options.addOption(zkCfgOption); options.addOption(patternOption); options.addOptionGroup(group); return options; }
From source file:org.apache.hive.beeline.HiveSchemaTool.java
@SuppressWarnings("static-access") private static void initOptions(Options cmdLineOptions) { Option help = new Option("help", "print this message"); Option upgradeOpt = new Option("upgradeSchema", "Schema upgrade"); Option upgradeFromOpt = OptionBuilder.withArgName("upgradeFrom").hasArg() .withDescription("Schema upgrade from a version").create("upgradeSchemaFrom"); Option initOpt = new Option("initSchema", "Schema initialization"); Option initToOpt = OptionBuilder.withArgName("initTo").hasArg() .withDescription("Schema initialization to a version").create("initSchemaTo"); Option infoOpt = new Option("info", "Show config and schema details"); Option validateOpt = new Option("validate", "Validate the database"); OptionGroup optGroup = new OptionGroup(); optGroup.addOption(upgradeOpt).addOption(initOpt).addOption(help).addOption(upgradeFromOpt) .addOption(initToOpt).addOption(infoOpt).addOption(validateOpt); optGroup.setRequired(true);/* w w w . j a v a2 s . c o m*/ Option userNameOpt = OptionBuilder.withArgName("user").hasArgs() .withDescription("Override config file user name").create("userName"); Option passwdOpt = OptionBuilder.withArgName("password").hasArgs() .withDescription("Override config file password").create("passWord"); Option dbTypeOpt = OptionBuilder.withArgName("databaseType").hasArgs() .withDescription("Metastore database type").create("dbType"); Option metaDbTypeOpt = OptionBuilder.withArgName("metaDatabaseType").hasArgs() .withDescription("Used only if upgrading the system catalog for hive").create("metaDbType"); Option urlOpt = OptionBuilder.withArgName("url").hasArgs().withDescription("connection url to the database") .create("url"); Option driverOpt = OptionBuilder.withArgName("driver").hasArgs() .withDescription("driver name for connection").create("driver"); Option dbOpts = OptionBuilder.withArgName("databaseOpts").hasArgs() .withDescription("Backend DB specific options").create("dbOpts"); Option dryRunOpt = new Option("dryRun", "list SQL scripts (no execute)"); Option verboseOpt = new Option("verbose", "only print SQL statements"); Option serversOpt = OptionBuilder.withArgName("serverList").hasArgs().withDescription( "a comma-separated list of servers used in location validation in the format of scheme://authority (e.g. hdfs://localhost:8000)") .create("servers"); cmdLineOptions.addOption(help); cmdLineOptions.addOption(dryRunOpt); cmdLineOptions.addOption(userNameOpt); cmdLineOptions.addOption(passwdOpt); cmdLineOptions.addOption(dbTypeOpt); cmdLineOptions.addOption(verboseOpt); cmdLineOptions.addOption(metaDbTypeOpt); cmdLineOptions.addOption(urlOpt); cmdLineOptions.addOption(driverOpt); cmdLineOptions.addOption(dbOpts); cmdLineOptions.addOption(serversOpt); cmdLineOptions.addOptionGroup(optGroup); }