List of usage examples for org.apache.commons.cli Option setRequired
public void setRequired(boolean required)
From source file:org.apache.hadoop.hbase.regionserver.TestJoinedScanners.java
/** * Command line interface:/*from www .ja v a 2 s . c o m*/ * @param args * @throws IOException if there is a bug while reading from disk */ public static void main(final String[] args) throws Exception { Option encodingOption = new Option("e", "blockEncoding", true, "Data block encoding; Default: FAST_DIFF"); encodingOption.setRequired(false); options.addOption(encodingOption); Option ratioOption = new Option("r", "selectionRatio", true, "Ratio of selected rows using essential column family"); ratioOption.setRequired(false); options.addOption(ratioOption); Option widthOption = new Option("w", "valueWidth", true, "Width of value for non-essential column family"); widthOption.setRequired(false); options.addOption(widthOption); CommandLineParser parser = new GnuParser(); CommandLine cmd = parser.parse(options, args); if (args.length < 1) { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("TestJoinedScanners", options, true); } if (cmd.hasOption("e")) { blockEncoding = DataBlockEncoding.valueOf(cmd.getOptionValue("e")); } if (cmd.hasOption("r")) { selectionRatio = Integer.parseInt(cmd.getOptionValue("r")); } if (cmd.hasOption("w")) { valueWidth = Integer.parseInt(cmd.getOptionValue("w")); } // run the test TestJoinedScanners test = new TestJoinedScanners(); test.testJoinedScanners(); }
From source file:org.apache.hadoop.hbase.util.HFileV1Detector.java
public HFileV1Detector() { Option pathOption = new Option("p", "path", true, "Path to a table, or hbase installation"); pathOption.setRequired(false); options.addOption(pathOption);//from w ww . j av a2 s .co m Option threadOption = new Option("n", "numberOfThreads", true, "Number of threads to use while processing HFiles."); threadOption.setRequired(false); options.addOption(threadOption); options.addOption("h", "help", false, "Help"); }
From source file:org.apache.hadoop.net.PodCIDRToNodeMapping.java
public static void main(String[] args) throws ParseException { Options options = new Options(); Option nameOption = new Option("n", true, "Name to resolve"); nameOption.setRequired(true); options.addOption(nameOption);/*from w ww . java 2 s . com*/ CommandLineParser parser = new BasicParser(); CommandLine cmd = parser.parse(options, args); BasicConfigurator.configure(); Logger.getRootLogger().setLevel(Level.DEBUG); PodCIDRToNodeMapping plugin = new PodCIDRToNodeMapping(); Configuration conf = new Configuration(); plugin.setConf(conf); String nameToResolve = cmd.getOptionValue(nameOption.getOpt()); List<String> networkPathDirs = plugin.resolve(Lists.newArrayList(nameToResolve)); log.info("Resolved " + nameToResolve + " to " + networkPathDirs); }
From source file:org.apache.hadoop.tools.HadoopArchiveLogsRunner.java
private void handleOpts(String[] args) throws ParseException { Options opts = new Options(); Option appIdOpt = new Option(APP_ID_OPTION, true, "Application ID"); appIdOpt.setRequired(true); Option userOpt = new Option(USER_OPTION, true, "User"); userOpt.setRequired(true);/* w w w. ja v a 2 s . c om*/ Option workingDirOpt = new Option(WORKING_DIR_OPTION, true, "Working Directory"); workingDirOpt.setRequired(true); Option remoteLogDirOpt = new Option(REMOTE_ROOT_LOG_DIR_OPTION, true, "Remote Root Log Directory"); remoteLogDirOpt.setRequired(true); Option suffixOpt = new Option(SUFFIX_OPTION, true, "Suffix"); suffixOpt.setRequired(true); Option useProxyOpt = new Option(NO_PROXY_OPTION, false, "Use Proxy"); opts.addOption(appIdOpt); opts.addOption(userOpt); opts.addOption(workingDirOpt); opts.addOption(remoteLogDirOpt); opts.addOption(suffixOpt); opts.addOption(useProxyOpt); CommandLineParser parser = new GnuParser(); CommandLine commandLine = parser.parse(opts, args); appId = commandLine.getOptionValue(APP_ID_OPTION); user = commandLine.getOptionValue(USER_OPTION); workingDir = commandLine.getOptionValue(WORKING_DIR_OPTION); remoteLogDir = commandLine.getOptionValue(REMOTE_ROOT_LOG_DIR_OPTION); suffix = commandLine.getOptionValue(SUFFIX_OPTION); proxy = true; if (commandLine.hasOption(NO_PROXY_OPTION)) { proxy = false; } }
From source file:org.apache.hadoop.yarn.client.cli.LogsCLI.java
@Override public int run(String[] args) throws Exception { Options opts = new Options(); opts.addOption(HELP_CMD, false, "Displays help for all commands."); Option appIdOpt = new Option(APPLICATION_ID_OPTION, true, "ApplicationId (required)"); appIdOpt.setRequired(true); opts.addOption(appIdOpt);/*from w ww. j av a 2 s . c om*/ opts.addOption(CONTAINER_ID_OPTION, true, "ContainerId. " + "By default, it will only print syslog if the application is runing." + " Work with -logFiles to get other logs."); opts.addOption(NODE_ADDRESS_OPTION, true, "NodeAddress in the format " + "nodename:port"); opts.addOption(APP_OWNER_OPTION, true, "AppOwner (assumed to be current user if not specified)"); Option amOption = new Option(AM_CONTAINER_OPTION, true, "Prints the AM Container logs for this application. " + "Specify comma-separated value to get logs for related AM Container. " + "For example, If we specify -am 1,2, we will get the logs for " + "the first AM Container as well as the second AM Container. " + "To get logs for all AM Containers, use -am ALL. " + "To get logs for the latest AM Container, use -am -1. " + "By default, it will only print out syslog. Work with -logFiles " + "to get other logs"); amOption.setValueSeparator(','); amOption.setArgs(Option.UNLIMITED_VALUES); amOption.setArgName("AM Containers"); opts.addOption(amOption); Option logFileOpt = new Option(CONTAINER_LOG_FILES, true, "Work with -am/-containerId and specify comma-separated value " + "to get specified container log files. Use \"ALL\" to fetch all the " + "log files for the container."); logFileOpt.setValueSeparator(','); logFileOpt.setArgs(Option.UNLIMITED_VALUES); logFileOpt.setArgName("Log File Name"); opts.addOption(logFileOpt); opts.getOption(APPLICATION_ID_OPTION).setArgName("Application ID"); opts.getOption(CONTAINER_ID_OPTION).setArgName("Container ID"); opts.getOption(NODE_ADDRESS_OPTION).setArgName("Node Address"); opts.getOption(APP_OWNER_OPTION).setArgName("Application Owner"); opts.getOption(AM_CONTAINER_OPTION).setArgName("AM Containers"); Options printOpts = new Options(); printOpts.addOption(opts.getOption(HELP_CMD)); printOpts.addOption(opts.getOption(CONTAINER_ID_OPTION)); printOpts.addOption(opts.getOption(NODE_ADDRESS_OPTION)); printOpts.addOption(opts.getOption(APP_OWNER_OPTION)); printOpts.addOption(opts.getOption(AM_CONTAINER_OPTION)); printOpts.addOption(opts.getOption(CONTAINER_LOG_FILES)); if (args.length < 1) { printHelpMessage(printOpts); return -1; } if (args[0].equals("-help")) { printHelpMessage(printOpts); return 0; } CommandLineParser parser = new GnuParser(); String appIdStr = null; String containerIdStr = null; String nodeAddress = null; String appOwner = null; boolean getAMContainerLogs = false; String[] logFiles = null; List<String> amContainersList = new ArrayList<String>(); try { CommandLine commandLine = parser.parse(opts, args, true); appIdStr = commandLine.getOptionValue(APPLICATION_ID_OPTION); containerIdStr = commandLine.getOptionValue(CONTAINER_ID_OPTION); nodeAddress = commandLine.getOptionValue(NODE_ADDRESS_OPTION); appOwner = commandLine.getOptionValue(APP_OWNER_OPTION); getAMContainerLogs = commandLine.hasOption(AM_CONTAINER_OPTION); if (getAMContainerLogs) { String[] amContainers = commandLine.getOptionValues(AM_CONTAINER_OPTION); for (String am : amContainers) { boolean errorInput = false; if (!am.trim().equalsIgnoreCase("ALL")) { try { int id = Integer.parseInt(am.trim()); if (id != -1 && id <= 0) { errorInput = true; } } catch (NumberFormatException ex) { errorInput = true; } if (errorInput) { System.err.println("Invalid input for option -am. Valid inputs are 'ALL', -1 " + "and any other integer which is larger than 0."); printHelpMessage(printOpts); return -1; } amContainersList.add(am.trim()); } else { amContainersList.add("ALL"); break; } } } if (commandLine.hasOption(CONTAINER_LOG_FILES)) { logFiles = commandLine.getOptionValues(CONTAINER_LOG_FILES); } } catch (ParseException e) { System.err.println("options parsing failed: " + e.getMessage()); printHelpMessage(printOpts); return -1; } if (appIdStr == null) { System.err.println("ApplicationId cannot be null!"); printHelpMessage(printOpts); return -1; } ApplicationId appId = null; try { appId = ApplicationId.fromString(appIdStr); } catch (Exception e) { System.err.println("Invalid ApplicationId specified"); return -1; } LogCLIHelpers logCliHelper = new LogCLIHelpers(); logCliHelper.setConf(getConf()); if (appOwner == null || appOwner.isEmpty()) { appOwner = UserGroupInformation.getCurrentUser().getShortUserName(); } YarnApplicationState appState = YarnApplicationState.NEW; try { appState = getApplicationState(appId); if (appState == YarnApplicationState.NEW || appState == YarnApplicationState.NEW_SAVING || appState == YarnApplicationState.SUBMITTED) { System.out.println("Logs are not avaiable right now."); return -1; } } catch (IOException | YarnException e) { System.err.println( "Unable to get ApplicationState." + " Attempting to fetch logs directly from the filesystem."); } // To get am logs if (getAMContainerLogs) { // if we do not specify the value for CONTAINER_LOG_FILES option, // we will only output syslog if (logFiles == null || logFiles.length == 0) { logFiles = new String[] { "syslog" }; } // If the application is running, we will call the RM WebService // to get the AppAttempts which includes the nodeHttpAddress // and containerId for all the AM Containers. // After that, we will call NodeManager webService to get the // related logs if (appState == YarnApplicationState.ACCEPTED || appState == YarnApplicationState.RUNNING) { return printAMContainerLogs(getConf(), appIdStr, amContainersList, logFiles, logCliHelper, appOwner, false); } else { // If the application is in the final state, we will call RM webservice // to get all AppAttempts information first. If we get nothing, // we will try to call AHS webservice to get related AppAttempts // which includes nodeAddress for the AM Containers. // After that, we will use nodeAddress and containerId // to get logs from HDFS directly. if (getConf().getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) { return printAMContainerLogs(getConf(), appIdStr, amContainersList, logFiles, logCliHelper, appOwner, true); } else { System.out.println("Can not get AMContainers logs for the application:" + appId); System.out.println("This application:" + appId + " is finished." + " Please enable the application history service. Or Using " + "yarn logs -applicationId <appId> -containerId <containerId> " + "--nodeAddress <nodeHttpAddress> to get the container logs"); return -1; } } } int resultCode = 0; if (containerIdStr != null) { // if we provide the node address and the application is in the final // state, we could directly get logs from HDFS. if (nodeAddress != null && isApplicationFinished(appState)) { // if user specified "ALL" as the logFiles param, pass null // to logCliHelper so that it fetches all the logs List<String> logs; if (logFiles == null) { logs = null; } else if (fetchAllLogFiles(logFiles)) { logs = null; } else { logs = Arrays.asList(logFiles); } return logCliHelper.dumpAContainersLogsForALogType(appIdStr, containerIdStr, nodeAddress, appOwner, logs); } try { // If the nodeAddress is not provided, we will try to get // the ContainerReport. In the containerReport, we could get // nodeAddress and nodeHttpAddress ContainerReport report = getContainerReport(containerIdStr); String nodeHttpAddress = report.getNodeHttpAddress() .replaceFirst(WebAppUtils.getHttpSchemePrefix(getConf()), ""); String nodeId = report.getAssignedNode().toString(); // If the application is not in the final state, // we will provide the NodeHttpAddress and get the container logs // by calling NodeManager webservice. if (!isApplicationFinished(appState)) { if (logFiles == null || logFiles.length == 0) { logFiles = new String[] { "syslog" }; } printContainerLogsFromRunningApplication(getConf(), appIdStr, containerIdStr, nodeHttpAddress, nodeId, logFiles, logCliHelper, appOwner); } else { String[] requestedLogFiles = logFiles; if (fetchAllLogFiles(logFiles)) { requestedLogFiles = null; } // If the application is in the final state, we will directly // get the container logs from HDFS. printContainerLogsForFinishedApplication(appIdStr, containerIdStr, nodeId, requestedLogFiles, logCliHelper, appOwner); } return resultCode; } catch (IOException | YarnException ex) { System.err.println( "Unable to get logs for this container:" + containerIdStr + "for the application:" + appId); if (!getConf().getBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, YarnConfiguration.DEFAULT_APPLICATION_HISTORY_ENABLED)) { System.out.println("Please enable the application history service. Or "); } System.out.println("Using " + "yarn logs -applicationId <appId> -containerId <containerId> " + "--nodeAddress <nodeHttpAddress> to get the container logs"); return -1; } } else { if (nodeAddress == null) { resultCode = logCliHelper.dumpAllContainersLogs(appId, appOwner, System.out); } else { System.out.println("Should at least provide ContainerId!"); printHelpMessage(printOpts); resultCode = -1; } } return resultCode; }
From source file:org.apache.hc.core5.benchmark.CommandLineUtils.java
public static Options getOptions() { final Option nopt = new Option("n", true, "Number of requests to perform. " + "The default is to just perform a single request which may lead " + "to non-representative benchmarking results"); nopt.setRequired(false); nopt.setArgName("requests"); final Option copt = new Option("c", true, "Number of multiple requests to make at a time. " + "The default is to just execute a single request"); copt.setRequired(false);//from w w w. j a va 2 s .c om copt.setArgName("concurrency"); final Option topt = new Option("t", true, "Seconds to max. to spend on benchmarking"); topt.setRequired(false); topt.setArgName("time-limit"); final Option sopt = new Option("s", true, "Seconds to max. wait for each response. Default is 60 seconds"); sopt.setRequired(false); sopt.setArgName("socket-Timeout"); final Option popt = new Option("p", true, "File containing data to enclose in the request"); popt.setRequired(false); popt.setArgName("Payload file"); final Option Topt = new Option("T", true, "Content-type header to use for enclosed request data"); Topt.setRequired(false); Topt.setArgName("content-type"); final Option vopt = new Option("v", true, "Set verbosity level: " + "1 prints warnings and errors, " + "2 prints response codes, " + "3 prints message headers, " + "4 prints HTTP/2 frame info, " + "5 prints HTTP/2 flow control events, " + "6 prints response content"); vopt.setRequired(false); vopt.setArgName("verbosity"); final Option iopt = new Option("i", false, "Use HEAD instead of GET"); iopt.setRequired(false); final Option Hopt = new Option("H", true, "Add arbitrary header line, " + "eg. 'Accept-Encoding: gzip' inserted after all normal " + "header lines. (repeatable as -H \"h1: v1\",\"h2: v2\" etc)"); Hopt.setRequired(false); Hopt.setArgName("header"); final Option kopt = new Option("k", false, "Use HTTP KeepAlive feature. Default is no KeepAlive"); kopt.setRequired(false); final Option mopt = new Option("m", true, "HTTP Method. Default is GET or POST if the request to enclose data"); mopt.setRequired(false); mopt.setArgName("HTTP method"); // HttpCore specific options final Option uopt = new Option("u", false, "Chunk entity. Default is false"); uopt.setRequired(false); final Option xopt = new Option("x", false, "Use Expect-Continue. Default is false"); xopt.setRequired(false); final Option gopt = new Option("g", false, "Accept GZip. Default is false"); gopt.setRequired(false); final Option http2opt = new Option("2", false, "Force HTTP/2"); gopt.setRequired(false); final Option hopt = new Option("h", false, "Display usage information"); nopt.setRequired(false); final Options options = new Options(); options.addOption(nopt); options.addOption(copt); options.addOption(topt); options.addOption(sopt); options.addOption(popt); options.addOption(Topt); options.addOption(vopt); options.addOption(iopt); options.addOption(Hopt); options.addOption(kopt); options.addOption(mopt); // HttpCore specific options options.addOption(uopt); options.addOption(xopt); options.addOption(gopt); options.addOption(http2opt); options.addOption(hopt); return options; }
From source file:org.apache.hc.core5.http.benchmark.CommandLineUtils.java
public static Options getOptions() { final Option iopt = new Option("i", false, "Do HEAD requests instead of GET (deprecated)"); iopt.setRequired(false); final Option oopt = new Option("o", false, "Use HTTP/S 1.0 instead of 1.1 (default)"); oopt.setRequired(false);// w w w. ja v a 2 s .co m final Option kopt = new Option("k", false, "Enable the HTTP KeepAlive feature, " + "i.e., perform multiple requests within one HTTP session. " + "Default is no KeepAlive"); kopt.setRequired(false); final Option uopt = new Option("u", false, "Chunk entity. Default is false"); uopt.setRequired(false); final Option xopt = new Option("x", false, "Use Expect-Continue. Default is false"); xopt.setRequired(false); final Option gopt = new Option("g", false, "Accept GZip. Default is false"); gopt.setRequired(false); final Option nopt = new Option("n", true, "Number of requests to perform for the " + "benchmarking session. The default is to just perform a single " + "request which usually leads to non-representative benchmarking " + "results"); nopt.setRequired(false); nopt.setArgName("requests"); final Option copt = new Option("c", true, "Concurrency while performing the " + "benchmarking session. The default is to just use a single thread/client"); copt.setRequired(false); copt.setArgName("concurrency"); final Option popt = new Option("p", true, "File containing data to POST or PUT"); popt.setRequired(false); popt.setArgName("Payload file"); final Option mopt = new Option("m", true, "HTTP Method. Default is POST. " + "Possible options are GET, POST, PUT, DELETE, HEAD, OPTIONS, TRACE"); mopt.setRequired(false); mopt.setArgName("HTTP method"); final Option Topt = new Option("T", true, "Content-type header to use for POST/PUT data"); Topt.setRequired(false); Topt.setArgName("content-type"); final Option topt = new Option("t", true, "Client side socket timeout (in ms) - default 60 Secs"); topt.setRequired(false); topt.setArgName("socket-Timeout"); final Option tlopt = new Option("l", true, "Time limit for the test to run (default is infinity)"); tlopt.setRequired(false); tlopt.setArgName("time-limit"); final Option Hopt = new Option("H", true, "Add arbitrary header line, " + "eg. 'Accept-Encoding: gzip' inserted after all normal " + "header lines. (repeatable as -H \"h1: v1\",\"h2: v2\" etc)"); Hopt.setRequired(false); Hopt.setArgName("header"); final Option vopt = new Option("v", true, "Set verbosity level - 4 and above " + "prints response content, 3 and above prints " + "information on headers, 2 and above prints response codes (404, 200, " + "etc.), 1 and above prints warnings and info"); vopt.setRequired(false); vopt.setArgName("verbosity"); final Option hopt = new Option("h", false, "Display usage information"); nopt.setRequired(false); final Options options = new Options(); options.addOption(iopt); options.addOption(mopt); options.addOption(uopt); options.addOption(xopt); options.addOption(gopt); options.addOption(kopt); options.addOption(nopt); options.addOption(copt); options.addOption(popt); options.addOption(Topt); options.addOption(vopt); options.addOption(Hopt); options.addOption(hopt); options.addOption(topt); options.addOption(oopt); options.addOption(tlopt); return options; }
From source file:org.apache.helix.controller.HelixControllerMain.java
@SuppressWarnings("static-access") synchronized private static Options constructCommandLineOptions() { Option helpOption = OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info") .create();/*w ww . j a v a2 s. co m*/ Option zkServerOption = OptionBuilder.withLongOpt(zkServerAddress) .withDescription("Provide zookeeper address").create(); zkServerOption.setArgs(1); zkServerOption.setRequired(true); zkServerOption.setArgName("ZookeeperServerAddress(Required)"); Option clusterOption = OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create(); clusterOption.setArgs(1); clusterOption.setRequired(true); clusterOption.setArgName("Cluster name (Required)"); Option modeOption = OptionBuilder.withLongOpt(mode) .withDescription("Provide cluster controller mode (Optional): STANDALONE (default) or DISTRIBUTED") .create(); modeOption.setArgs(1); modeOption.setRequired(false); modeOption.setArgName("Cluster controller mode (Optional)"); Option controllerNameOption = OptionBuilder.withLongOpt(name) .withDescription("Provide cluster controller name (Optional)").create(); controllerNameOption.setArgs(1); controllerNameOption.setRequired(false); controllerNameOption.setArgName("Cluster controller name (Optional)"); Options options = new Options(); options.addOption(helpOption); options.addOption(zkServerOption); options.addOption(clusterOption); options.addOption(modeOption); options.addOption(controllerNameOption); return options; }
From source file:org.apache.helix.mock.participant.DummyProcess.java
@SuppressWarnings("static-access") synchronized private static Options constructCommandLineOptions() { Option helpOption = OptionBuilder.withLongOpt(help).withDescription("Prints command-line options info") .create();// w w w. j ava 2 s.c o m Option clusterOption = OptionBuilder.withLongOpt(cluster).withDescription("Provide cluster name").create(); clusterOption.setArgs(1); clusterOption.setRequired(true); clusterOption.setArgName("Cluster name (Required)"); Option hostOption = OptionBuilder.withLongOpt(hostAddress).withDescription("Provide host name").create(); hostOption.setArgs(1); hostOption.setRequired(true); hostOption.setArgName("Host name (Required)"); Option portOption = OptionBuilder.withLongOpt(hostPort).withDescription("Provide host port").create(); portOption.setArgs(1); portOption.setRequired(true); portOption.setArgName("Host port (Required)"); Option cmTypeOption = OptionBuilder.withLongOpt(helixManagerType) .withDescription("Provide cluster manager type (e.g. 'zk', 'static-file', or 'dynamic-file'") .create(); cmTypeOption.setArgs(1); cmTypeOption.setRequired(true); cmTypeOption.setArgName("Clsuter manager type (e.g. 'zk', 'static-file', or 'dynamic-file') (Required)"); Option zkServerOption = OptionBuilder.withLongOpt(zkServer).withDescription("Provide zookeeper address") .create(); zkServerOption.setArgs(1); zkServerOption.setRequired(true); zkServerOption.setArgName("ZookeeperServerAddress(Required for zk-based cluster manager)"); // Option rootNsOption = OptionBuilder.withLongOpt(rootNamespace) // .withDescription("Provide root namespace for dynamic-file based cluster manager").create(); // rootNsOption.setArgs(1); // rootNsOption.setRequired(true); // rootNsOption.setArgName("Root namespace (Required for dynamic-file based cluster manager)"); Option transDelayOption = OptionBuilder.withLongOpt(transDelay).withDescription("Provide state trans delay") .create(); transDelayOption.setArgs(1); transDelayOption.setRequired(false); transDelayOption.setArgName("Delay time in state transition, in MS"); OptionGroup optionGroup = new OptionGroup(); optionGroup.addOption(zkServerOption); Options options = new Options(); options.addOption(helpOption); options.addOption(clusterOption); options.addOption(hostOption); options.addOption(portOption); options.addOption(transDelayOption); options.addOption(cmTypeOption); options.addOptionGroup(optionGroup); return options; }