Example usage for org.apache.commons.cli Option setArgName

List of usage examples for org.apache.commons.cli Option setArgName

Introduction

In this page you can find the example usage for org.apache.commons.cli Option setArgName.

Prototype

public void setArgName(String argName) 

Source Link

Document

Sets the display name for the argument value.

Usage

From source file:org.apache.metron.stellar.common.shell.cli.StellarShell.java

/**
 * @return The valid command line options.
 *///from w  w  w.  j av a 2 s .co m
private Options defineCommandLineOptions() {
    Options options = new Options();
    options.addOption("z", "zookeeper", true, "Zookeeper URL fragment in the form [HOSTNAME|IPADDRESS]:PORT");
    options.addOption("v", "variables", true, "File containing a JSON Map of variables");
    options.addOption("irc", "inputrc", true, "File containing the inputrc if not the default ~/.inputrc");
    options.addOption("na", "no_ansi", false, "Make the input prompt not use ANSI colors.");
    options.addOption("h", "help", false, "Print help");
    options.addOption("p", "properties", true, "File containing Stellar properties");
    Option log4j = new Option("l", "log4j", true, "The log4j properties file to load");
    log4j.setArgName("FILE");
    log4j.setRequired(false);
    options.addOption(log4j);

    return options;
}

From source file:org.apache.metron.utils.SourceConfigUtils.java

public static void main(String[] args) {

    Options options = new Options();
    {/*from  w ww  . j  av a2s . c  o  m*/
        Option o = new Option("h", "help", false, "This screen");
        o.setRequired(false);
        options.addOption(o);
    }
    {
        Option o = new Option("p", "config_files", true,
                "Path to the source config files.  Must be named like \"$source\"-config.json");
        o.setArgName("DIR_NAME");
        o.setRequired(false);
        options.addOption(o);
    }
    {
        Option o = new Option("z", "zk", true, "Zookeeper Quroum URL (zk1:2181,zk2:2181,...");
        o.setArgName("ZK_QUORUM");
        o.setRequired(true);
        options.addOption(o);
    }

    try {
        CommandLineParser parser = new PosixParser();
        CommandLine cmd = null;
        try {
            cmd = parser.parse(options, args);
        } catch (ParseException pe) {
            pe.printStackTrace();
            final HelpFormatter usageFormatter = new HelpFormatter();
            usageFormatter.printHelp("SourceConfigUtils", null, options, null, true);
            System.exit(-1);
        }
        if (cmd.hasOption("h")) {
            final HelpFormatter usageFormatter = new HelpFormatter();
            usageFormatter.printHelp("SourceConfigUtils", null, options, null, true);
            System.exit(0);
        }

        String zkQuorum = cmd.getOptionValue("z");
        if (cmd.hasOption("p")) {
            String sourcePath = cmd.getOptionValue("p");
            File root = new File(sourcePath);

            if (root.isDirectory()) {
                for (File child : root.listFiles()) {
                    writeToZookeeperFromFile(child.getName().replaceFirst("-config.json", ""), child.getPath(),
                            zkQuorum);
                }
            }
        }

        SourceConfigUtils.dumpConfigs(zkQuorum);

    } catch (Exception e) {
        e.printStackTrace();
        System.exit(-1);
    }

}

From source file:org.apache.pig.builtin.PigStorage.java

private Options populateValidOptions() {
    Options validOptions = new Options();
    validOptions.addOption("schema", false,
            "Loads / Stores the schema of the relation using a hidden JSON file.");
    validOptions.addOption("noschema", false, "Disable attempting to load data schema from the filesystem.");
    validOptions.addOption(TAG_SOURCE_FILE, false,
            "Appends input source file name to beginning of each tuple.");
    validOptions.addOption(TAG_SOURCE_PATH, false,
            "Appends input source file path to beginning of each tuple.");
    validOptions.addOption("tagsource", false, "Appends input source file name to beginning of each tuple.");
    Option overwrite = new Option("overwrite", "Overwrites the destination.");
    overwrite.setLongOpt("overwrite");
    overwrite.setOptionalArg(true);//w  w w .  j a  v a  2 s .c  om
    overwrite.setArgs(1);
    overwrite.setArgName("overwrite");
    validOptions.addOption(overwrite);

    return validOptions;
}

From source file:org.apache.pirk.querier.wideskies.QuerierCLI.java

/**
 * Create the options available for the DistributedTestDriver
 * /*from ww  w  .j a  v a  2  s  . co  m*/
 * @return Apache's CLI Options object
 */
private Options createOptions() {
    Options options = new Options();

    // help
    Option optionHelp = new Option("h", "help", false,
            "Print out the help documentation for this command line execution");
    optionHelp.setRequired(false);
    options.addOption(optionHelp);

    // local.querier.properties
    Option optionLocalPropFile = new Option("localPropFile", LOCALPROPFILE, true,
            "Optional local properties file");
    optionLocalPropFile.setRequired(false);
    optionLocalPropFile.setArgName(LOCALPROPFILE);
    optionLocalPropFile.setType(String.class);
    options.addOption(optionLocalPropFile);

    // ACTION
    Option optionACTION = new Option("a", QuerierProps.ACTION, true,
            "required - 'encrypt' or 'decrypt' -- The action performed by the QuerierDriver");
    optionACTION.setRequired(false);
    optionACTION.setArgName(QuerierProps.ACTION);
    optionACTION.setType(String.class);
    options.addOption(optionACTION);

    // INPUTFILE
    Option optionINPUTFILE = new Option("i", QuerierProps.INPUTFILE, true,
            "required - Fully qualified file containing input "
                    + "-- \n The input is either: \n (1) For Encryption: A query file - Contains the query selectors, one per line; "
                    + "the first line must be the query number \n OR \n (2) For Decryption: A response file - Contains the serialized Response object");
    optionINPUTFILE.setRequired(false);
    optionINPUTFILE.setArgName(QuerierProps.INPUTFILE);
    optionINPUTFILE.setType(String.class);
    options.addOption(optionINPUTFILE);

    // OUTPUTFILE
    Option optionOUTPUTFILE = new Option("o", QuerierProps.OUTPUTFILE, true,
            "required - Fully qualified file for the result output. "
                    + "\n The output file specifies either: \n (1) For encryption: \n \t (a) A file to contain the serialized Querier object named: "
                    + "<outputFile>-" + QuerierConst.QUERIER_FILETAG + "  AND \n \t "
                    + "(b) A file to contain the serialized Query object named: <outputFile>-"
                    + QuerierConst.QUERY_FILETAG + "\n "
                    + "OR \n (2) A file to contain the decryption results where each line is where each line "
                    + "corresponds to one hit and is a JSON object with the schema QuerySchema");
    optionOUTPUTFILE.setRequired(false);
    optionOUTPUTFILE.setArgName(QuerierProps.OUTPUTFILE);
    optionOUTPUTFILE.setType(String.class);
    options.addOption(optionOUTPUTFILE);

    // NUMTHREADS
    Option optionNUMTHREADS = new Option("nt", QuerierProps.NUMTHREADS, true,
            "required -- Number of threads to use for encryption/decryption");
    optionNUMTHREADS.setRequired(false);
    optionNUMTHREADS.setArgName(QuerierProps.NUMTHREADS);
    optionNUMTHREADS.setType(String.class);
    options.addOption(optionNUMTHREADS);

    // DATASCHEMAS
    Option optionDataSchemas = new Option("ds", QuerierProps.DATASCHEMAS, true,
            "optional -- Comma separated list of data schema file names");
    optionDataSchemas.setRequired(false);
    optionDataSchemas.setArgName(QuerierProps.DATASCHEMAS);
    optionDataSchemas.setType(String.class);
    options.addOption(optionDataSchemas);

    // QUERYSCHEMAS
    Option optionQuerySchemas = new Option("qs", QuerierProps.QUERYSCHEMAS, true,
            "optional -- Comma separated list of query schema file names");
    optionQuerySchemas.setRequired(false);
    optionQuerySchemas.setArgName(QuerierProps.QUERYSCHEMAS);
    optionQuerySchemas.setType(String.class);
    options.addOption(optionQuerySchemas);

    // TYPE
    Option optionTYPE = new Option("qt", QuerierProps.QUERYTYPE, true,
            "required for encryption -- Type of the query as defined "
                    + "in the 'schemaName' tag of the corresponding query schema file");
    optionTYPE.setRequired(false);
    optionTYPE.setArgName(QuerierProps.QUERYTYPE);
    optionTYPE.setType(String.class);
    options.addOption(optionTYPE);

    // HASHBITSIZE
    Option optionHASHBITSIZE = new Option("hb", QuerierProps.HASHBITSIZE, true,
            "required -- Bit size of keyed hash");
    optionHASHBITSIZE.setRequired(false);
    optionHASHBITSIZE.setArgName(QuerierProps.HASHBITSIZE);
    optionHASHBITSIZE.setType(String.class);
    options.addOption(optionHASHBITSIZE);

    // HASHKEY
    Option optionHASHKEY = new Option("hk", QuerierProps.HASHKEY, true,
            "required for encryption -- String key for the keyed hash functionality");
    optionHASHKEY.setRequired(false);
    optionHASHKEY.setArgName(QuerierProps.HASHKEY);
    optionHASHKEY.setType(String.class);
    options.addOption(optionHASHKEY);

    // DATAPARTITIONSIZE
    Option optionDATAPARTITIONSIZE = new Option("dps", QuerierProps.DATAPARTITIONSIZE, true,
            "required for encryption -- Partition bit size in data partitioning");
    optionDATAPARTITIONSIZE.setRequired(false);
    optionDATAPARTITIONSIZE.setArgName(QuerierProps.DATAPARTITIONSIZE);
    optionDATAPARTITIONSIZE.setType(String.class);
    options.addOption(optionDATAPARTITIONSIZE);

    // PAILLIERBITSIZE
    Option optionPAILLIERBITSIZE = new Option("pbs", QuerierProps.PAILLIERBITSIZE, true,
            "required for encryption -- Paillier modulus size N");
    optionPAILLIERBITSIZE.setRequired(false);
    optionPAILLIERBITSIZE.setArgName(QuerierProps.PAILLIERBITSIZE);
    optionPAILLIERBITSIZE.setType(String.class);
    options.addOption(optionPAILLIERBITSIZE);

    // CERTAINTY
    Option optionCERTAINTY = new Option("c", QuerierProps.CERTAINTY, true,
            "required for encryption -- Certainty of prime generation for Paillier -- must  be greater than or "
                    + "equal to " + SystemConfiguration.getProperty("pir.primeCertainty") + "");
    optionCERTAINTY.setRequired(false);
    optionCERTAINTY.setArgName(QuerierProps.CERTAINTY);
    optionCERTAINTY.setType(String.class);
    options.addOption(optionCERTAINTY);

    // BITSET
    Option optionBITSET = new Option("b", QuerierProps.BITSET, true,
            "required for encryption -- Ensure that this bit position is set in the "
                    + "Paillier modulus (will generate Paillier moduli until finding one in which this bit is set)");
    optionBITSET.setRequired(false);
    optionBITSET.setArgName(QuerierProps.BITSET);
    optionBITSET.setType(String.class);
    options.addOption(optionBITSET);

    // embedSelector
    Option optionEmbedSelector = new Option("embed", QuerierProps.EMBEDSELECTOR, true,
            "required for encryption -- 'true' or 'false' - Whether or not to embed "
                    + "the selector in the results to reduce false positives");
    optionEmbedSelector.setRequired(false);
    optionEmbedSelector.setArgName(QuerierProps.EMBEDSELECTOR);
    optionEmbedSelector.setType(String.class);
    options.addOption(optionEmbedSelector);

    // useMemLookupTable
    Option optionUseMemLookupTable = new Option("mlu", QuerierProps.USEMEMLOOKUPTABLE, true,
            "required for encryption -- 'true' or 'false' - Whether or not to generate and use "
                    + "an in memory modular exponentation lookup table - only for standalone/testing right now...");
    optionUseMemLookupTable.setRequired(false);
    optionUseMemLookupTable.setArgName(QuerierProps.USEMEMLOOKUPTABLE);
    optionUseMemLookupTable.setType(String.class);
    options.addOption(optionUseMemLookupTable);

    // useHDFSLookupTable
    Option optionUseHDFSLookupTable = new Option("lu", QuerierProps.USEHDFSLOOKUPTABLE, true,
            "required for encryption -- 'true' or 'false' -- Whether or not to generate and use "
                    + "a hdfs modular exponentation lookup table");
    optionUseHDFSLookupTable.setRequired(false);
    optionUseHDFSLookupTable.setArgName(QuerierProps.USEHDFSLOOKUPTABLE);
    optionUseHDFSLookupTable.setType(String.class);
    options.addOption(optionUseHDFSLookupTable);

    // QUERIERFILE
    Option optionQUERIERFILE = new Option("qf", QuerierProps.QUERIERFILE, true,
            "required for decryption -- Fully qualified file containing the serialized Querier object");
    optionQUERIERFILE.setRequired(false);
    optionQUERIERFILE.setArgName(QuerierProps.QUERIERFILE);
    optionQUERIERFILE.setType(String.class);
    options.addOption(optionQUERIERFILE);

    // embedQuerySchema
    Option optionEMBEDQUERYSCHEMA = new Option("embedQS", QuerierProps.EMBEDQUERYSCHEMA, true,
            "optional (defaults to false) -- Whether or not to embed the QuerySchema in the Query (via QueryInfo)");
    optionEMBEDQUERYSCHEMA.setRequired(false);
    optionEMBEDQUERYSCHEMA.setArgName(QuerierProps.EMBEDQUERYSCHEMA);
    optionEMBEDQUERYSCHEMA.setType(String.class);
    options.addOption(optionEMBEDQUERYSCHEMA);

    // SR_ALGORITHM
    Option optionSR_ALGORITHM = new Option("srAlg", QuerierProps.SR_ALGORITHM, true,
            "optional - specify the SecureRandom algorithm, defaults to NativePRNG");
    optionSR_ALGORITHM.setRequired(false);
    optionSR_ALGORITHM.setArgName(QuerierProps.SR_ALGORITHM);
    optionSR_ALGORITHM.setType(String.class);
    options.addOption(optionSR_ALGORITHM);

    // SR_PROVIDERS
    Option optionSR_PROVIDER = new Option("srProvider", QuerierProps.SR_PROVIDER, true,
            "optional - specify the SecureRandom provider, defaults to SUN");
    optionSR_PROVIDER.setRequired(false);
    optionSR_PROVIDER.setArgName(QuerierProps.SR_PROVIDER);
    optionSR_PROVIDER.setType(String.class);
    options.addOption(optionSR_PROVIDER);

    return options;
}

From source file:org.apache.pirk.responder.wideskies.ResponderCLI.java

/**
 * Create the options available for the DistributedTestDriver
 *
 * @return Apache's CLI Options object/*  ww  w . j  a  va  2s .c o  m*/
 */
private Options createOptions() {
    Options options = new Options();

    // help
    Option optionHelp = new Option("h", "help", false,
            "Print out the help documentation for this command line execution");
    optionHelp.setRequired(false);
    options.addOption(optionHelp);

    // local.querier.properties
    Option optionLocalPropFile = new Option("localPropFile", LOCALPROPFILE, true,
            "Optional local properties file");
    optionLocalPropFile.setRequired(false);
    optionLocalPropFile.setArgName(LOCALPROPFILE);
    optionLocalPropFile.setType(String.class);
    options.addOption(optionLocalPropFile);

    // platform
    Option optionPlatform = new Option("p", ResponderProps.PLATFORM, true,
            "required -- 'mapreduce', 'spark', or 'standalone' : Processing platform technology for the responder");
    optionPlatform.setRequired(false);
    optionPlatform.setArgName(ResponderProps.PLATFORM);
    optionPlatform.setType(String.class);
    options.addOption(optionPlatform);

    // queryInput
    Option optionQueryInput = new Option("q", ResponderProps.QUERYINPUT, true,
            "required -- Fully qualified dir in hdfs of Query files");
    optionQueryInput.setRequired(false);
    optionQueryInput.setArgName(ResponderProps.QUERYINPUT);
    optionQueryInput.setType(String.class);
    options.addOption(optionQueryInput);

    // dataInputFormat
    Option optionDataInputFormat = new Option("d", ResponderProps.DATAINPUTFORMAT, true,
            "required -- 'base', 'elasticsearch', or 'standalone' : Specify the input format");
    optionDataInputFormat.setRequired(false);
    optionDataInputFormat.setArgName(ResponderProps.DATAINPUTFORMAT);
    optionDataInputFormat.setType(String.class);
    options.addOption(optionDataInputFormat);

    // inputData
    Option optionInputData = new Option("i", ResponderProps.INPUTDATA, true,
            "required -- Fully qualified name of input file/directory in hdfs; used if inputFormat = 'base'");
    optionInputData.setRequired(false);
    optionInputData.setArgName(ResponderProps.INPUTDATA);
    optionInputData.setType(String.class);
    options.addOption(optionInputData);

    // baseInputFormat
    Option optionBaseInputFormat = new Option("bif", ResponderProps.BASEINPUTFORMAT, true,
            "required if baseInputFormat = 'base' -- Full class name of the InputFormat to use when reading in the data - must extend BaseInputFormat");
    optionBaseInputFormat.setRequired(false);
    optionBaseInputFormat.setArgName(ResponderProps.BASEINPUTFORMAT);
    optionBaseInputFormat.setType(String.class);
    options.addOption(optionBaseInputFormat);

    // baseQuery
    Option optionBaseQuery = new Option("j", ResponderProps.BASEQUERY, true,
            "optional -- ElasticSearch-like query if using 'base' input format - used to filter records in the RecordReader");
    optionBaseQuery.setRequired(false);
    optionBaseQuery.setArgName(ResponderProps.BASEQUERY);
    optionBaseQuery.setType(String.class);
    options.addOption(optionBaseQuery);

    // esResource
    Option optionEsResource = new Option("er", ResponderProps.ESRESOURCE, true,
            "required if baseInputFormat = 'elasticsearch' -- Requires the format <index>/<type> : Elasticsearch resource where data is read and written to");
    optionEsResource.setRequired(false);
    optionEsResource.setArgName(ResponderProps.ESRESOURCE);
    optionEsResource.setType(String.class);
    options.addOption(optionEsResource);

    // esQuery
    Option optionEsQuery = new Option("eq", ResponderProps.ESQUERY, true,
            "required if baseInputFormat = 'elasticsearch' -- ElasticSearch query if using 'elasticsearch' input format");
    optionEsQuery.setRequired(false);
    optionEsQuery.setArgName(ResponderProps.ESQUERY);
    optionEsQuery.setType(String.class);
    options.addOption(optionEsQuery);

    // esNodes
    Option optionEsNodes = new Option("en", ResponderProps.ESNODES, true,
            "required if baseInputFormat = 'elasticsearch' -- ElasticSearch node in the cluster");
    optionEsNodes.setRequired(false);
    optionEsNodes.setArgName(ResponderProps.ESNODES);
    optionEsNodes.setType(String.class);
    options.addOption(optionEsNodes);

    // esPort
    Option optionEsPort = new Option("ep", ResponderProps.ESPORT, true,
            "required if baseInputFormat = 'elasticsearch' -- ElasticSearch cluster port");
    optionEsPort.setRequired(false);
    optionEsPort.setArgName(ResponderProps.ESPORT);
    optionEsPort.setType(String.class);
    options.addOption(optionEsPort);

    // outputFile
    Option optionOutputFile = new Option("o", ResponderProps.OUTPUTFILE, true,
            "required -- Fully qualified name of output file in hdfs");
    optionOutputFile.setRequired(false);
    optionOutputFile.setArgName(ResponderProps.OUTPUTFILE);
    optionOutputFile.setType(String.class);
    options.addOption(optionOutputFile);

    // stopListFile
    Option optionStopListFile = new Option("sf", ResponderProps.STOPLISTFILE, true,
            "optional (unless using StopListFilter) -- Fully qualified file in hdfs containing stoplist terms; used by the StopListFilter");
    optionStopListFile.setRequired(false);
    optionStopListFile.setArgName(ResponderProps.STOPLISTFILE);
    optionStopListFile.setType(String.class);
    options.addOption(optionStopListFile);

    // numReduceTasks
    Option optionNumReduceTasks = new Option("nr", ResponderProps.NUMREDUCETASKS, true,
            "optional -- Number of reduce tasks");
    optionNumReduceTasks.setRequired(false);
    optionNumReduceTasks.setArgName(ResponderProps.NUMREDUCETASKS);
    optionNumReduceTasks.setType(String.class);
    options.addOption(optionNumReduceTasks);

    // useLocalCache
    Option optionUseLocalCache = new Option("ulc", ResponderProps.USELOCALCACHE, true,
            "optional -- 'true' or 'false : Whether or not to use the local cache for modular exponentiation; Default is 'true'");
    optionUseLocalCache.setRequired(false);
    optionUseLocalCache.setArgName(ResponderProps.USELOCALCACHE);
    optionUseLocalCache.setType(String.class);
    options.addOption(optionUseLocalCache);

    // limitHitsPerSelector
    Option optionLimitHitsPerSelector = new Option("lh", ResponderProps.LIMITHITSPERSELECTOR, true,
            "optional -- 'true' or 'false : Whether or not to limit the number of hits per selector; Default is 'true'");
    optionLimitHitsPerSelector.setRequired(false);
    optionLimitHitsPerSelector.setArgName(ResponderProps.LIMITHITSPERSELECTOR);
    optionLimitHitsPerSelector.setType(String.class);
    options.addOption(optionLimitHitsPerSelector);

    // maxHitsPerSelector
    Option optionMaxHitsPerSelector = new Option("mh", ResponderProps.MAXHITSPERSELECTOR, true,
            "optional -- Max number of hits encrypted per selector");
    optionMaxHitsPerSelector.setRequired(false);
    optionMaxHitsPerSelector.setArgName(ResponderProps.MAXHITSPERSELECTOR);
    optionMaxHitsPerSelector.setType(String.class);
    options.addOption(optionMaxHitsPerSelector);

    // mapreduce.map.memory.mb
    Option optionMapMemory = new Option("mm", ResponderProps.MAPMEMORY, true,
            "optional -- Amount of memory (in MB) to allocate per map task; Default is 3000");
    optionMapMemory.setRequired(false);
    optionMapMemory.setArgName(ResponderProps.MAPMEMORY);
    optionMapMemory.setType(String.class);
    options.addOption(optionMapMemory);

    // mapreduce.reduce.memory.mb
    Option optionReduceMemory = new Option("rm", ResponderProps.REDUCEMEMORY, true,
            "optional -- Amount of memory (in MB) to allocate per reduce task; Default is 3000");
    optionReduceMemory.setRequired(false);
    optionReduceMemory.setArgName(ResponderProps.REDUCEMEMORY);
    optionReduceMemory.setType(String.class);
    options.addOption(optionReduceMemory);

    // mapreduce.map.java.opts
    Option optionMapOpts = new Option("mjo", ResponderProps.MAPJAVAOPTS, true,
            "optional -- Amount of heap (in MB) to allocate per map task; Default is -Xmx2800m");
    optionMapOpts.setRequired(false);
    optionMapOpts.setArgName(ResponderProps.MAPJAVAOPTS);
    optionMapOpts.setType(String.class);
    options.addOption(optionMapOpts);

    // mapreduce.reduce.java.opts
    Option optionReduceOpts = new Option("rjo", ResponderProps.REDUCEJAVAOPTS, true,
            "optional -- Amount of heap (in MB) to allocate per reduce task; Default is -Xmx2800m");
    optionReduceOpts.setRequired(false);
    optionReduceOpts.setArgName(ResponderProps.REDUCEJAVAOPTS);
    optionReduceOpts.setType(String.class);
    options.addOption(optionReduceOpts);

    // data.schemas
    Option optionDataSchemas = new Option("ds", ResponderProps.DATASCHEMAS, true,
            "required -- Comma separated list of data schema file names");
    optionDataSchemas.setRequired(false);
    optionDataSchemas.setArgName(ResponderProps.DATASCHEMAS);
    optionDataSchemas.setType(String.class);
    options.addOption(optionDataSchemas);

    // query.schemas
    Option optionQuerySchemas = new Option("qs", ResponderProps.QUERYSCHEMAS, true,
            "required -- Comma separated list of query schema file names");
    optionQuerySchemas.setRequired(false);
    optionQuerySchemas.setArgName(ResponderProps.QUERYSCHEMAS);
    optionQuerySchemas.setType(String.class);
    options.addOption(optionQuerySchemas);

    // pir.numExpLookupPartitions
    Option optionExpParts = new Option("expParts", ResponderProps.NUMEXPLOOKUPPARTS, true,
            "optional -- Number of partitions for the exp lookup table");
    optionExpParts.setRequired(false);
    optionExpParts.setArgName(ResponderProps.NUMEXPLOOKUPPARTS);
    optionExpParts.setType(String.class);
    options.addOption(optionExpParts);

    // pir.numExpLookupPartitions
    Option optionHdfsExp = new Option("hdfsExp", ResponderProps.USEHDFSLOOKUPTABLE, true,
            "optional -- 'true' or 'false' - Whether or not to generate and use the hdfs lookup table"
                    + " for modular exponentiation");
    optionHdfsExp.setRequired(false);
    optionHdfsExp.setArgName(ResponderProps.USEHDFSLOOKUPTABLE);
    optionHdfsExp.setType(String.class);
    options.addOption(optionHdfsExp);

    // numDataPartitions
    Option optionDataParts = new Option("dataParts", ResponderProps.NUMDATAPARTITIONS, true,
            "optional -- Number of partitions for the input data");
    optionDataParts.setRequired(false);
    optionDataParts.setArgName(ResponderProps.NUMDATAPARTITIONS);
    optionDataParts.setType(String.class);
    options.addOption(optionDataParts);

    // useModExpJoin
    Option optionModExpJoin = new Option("useModExpJoin", ResponderProps.USEMODEXPJOIN, true,
            "optional -- 'true' or 'false' -- Spark only -- Whether or not to "
                    + "pre-compute the modular exponentiation table and join it to the data partitions when performing the encrypted row calculations");
    optionModExpJoin.setRequired(false);
    optionModExpJoin.setArgName(ResponderProps.USEMODEXPJOIN);
    optionModExpJoin.setType(String.class);
    options.addOption(optionModExpJoin);

    // numColMultPartitions
    Option optionNumColMultPartitions = new Option("numColMultParts", ResponderProps.NUMCOLMULTPARTITIONS, true,
            "optional, Spark only -- Number of partitions to " + "use when performing column multiplication");
    optionNumColMultPartitions.setRequired(false);
    optionNumColMultPartitions.setArgName(ResponderProps.NUMCOLMULTPARTITIONS);
    optionNumColMultPartitions.setType(String.class);
    options.addOption(optionNumColMultPartitions);

    // colMultReduceByKey
    Option optionColMultReduceByKey = new Option("colMultRBK", ResponderProps.COLMULTREDUCEBYKEY, true,
            "optional -- 'true' or 'false' -- Spark only -- "
                    + "If true, uses reduceByKey in performing column multiplication; if false, uses groupByKey -> reduce");
    optionColMultReduceByKey.setRequired(false);
    optionColMultReduceByKey.setArgName(ResponderProps.COLMULTREDUCEBYKEY);
    optionColMultReduceByKey.setType(String.class);
    options.addOption(optionColMultReduceByKey);

    // allowEmbeddedQS
    Option optionAllowEmbeddedQS = new Option("allowEmbeddedQS", ResponderProps.ALLOWEMBEDDEDQUERYSCHEMAS, true,
            "optional -- 'true' or 'false'  (defaults to 'false') -- "
                    + "If true, allows embedded QuerySchemas for a query.");
    optionAllowEmbeddedQS.setRequired(false);
    optionAllowEmbeddedQS.setArgName(ResponderProps.ALLOWEMBEDDEDQUERYSCHEMAS);
    optionAllowEmbeddedQS.setType(String.class);
    options.addOption(optionAllowEmbeddedQS);

    // batchSeconds - spark streaming
    Option optionBatchSeconds = new Option("batchSeconds", ResponderProps.BATCHSECONDS, true,
            "optional -- Number of seconds per batch in Spark Streaming; defaults to 30");
    optionBatchSeconds.setRequired(false);
    optionBatchSeconds.setArgName(ResponderProps.BATCHSECONDS);
    optionBatchSeconds.setType(String.class);
    options.addOption(optionBatchSeconds);

    // windowLength - spark streaming
    Option optionWindowLength = new Option("windowLength", ResponderProps.WINDOWLENGTH, true,
            "optional -- Number of seconds per window in Spark Streaming; defaults to 60");
    optionWindowLength.setRequired(false);
    optionWindowLength.setArgName(ResponderProps.WINDOWLENGTH);
    optionWindowLength.setType(String.class);
    options.addOption(optionWindowLength);

    // maxBatches - spark streaming
    Option optionMaxBatches = new Option("maxBatches", ResponderProps.MAXBATCHES, true,
            "optional -- Max batches to process in Spark Streaming; defaults to -1 - unlimited");
    optionMaxBatches.setRequired(false);
    optionMaxBatches.setArgName(ResponderProps.MAXBATCHES);
    optionMaxBatches.setType(String.class);
    options.addOption(optionMaxBatches);

    // stopGracefully - spark streaming
    Option optionStopGracefully = new Option("stopGracefully", ResponderProps.STOPGRACEFULLY, true,
            "optional -- Whether or not to stop gracefully in Spark Streaming; defaults to false");
    optionStopGracefully.setRequired(false);
    optionStopGracefully.setArgName(ResponderProps.STOPGRACEFULLY);
    optionStopGracefully.setType(String.class);
    options.addOption(optionStopGracefully);

    // useQueueStream - spark streaming
    Option optionUseQueueStream = new Option("queueStream", ResponderProps.USEQUEUESTREAM, true,
            "optional -- Whether or not to use a queue stream in Spark Streaming; defaults to false");
    optionUseQueueStream.setRequired(false);
    optionUseQueueStream.setArgName(ResponderProps.USEQUEUESTREAM);
    optionUseQueueStream.setType(String.class);
    options.addOption(optionUseQueueStream);

    return options;
}

From source file:org.apache.pirk.test.distributed.DistributedTestCLI.java

/**
 * Create the options available for the DistributedTestDriver
 * /*w ww  .  j ava2 s. c o m*/
 * @return Apache's CLI Options object
 */
private Options createOptions() {
    Options options = new Options();

    // help
    Option optionHelp = new Option("h", "help", false,
            "Print out the help documentation for this command line execution");
    optionHelp.setRequired(false);
    options.addOption(optionHelp);

    // jar file
    Option optionJar = new Option("j", "jar", true, "required -- Fully qualified jar file");
    optionJar.setRequired(false);
    options.addOption(optionJar);

    // test selection
    String tests = "testNum = 1: Wideskies Tests\n";
    tests += "Subtests:\n";
    tests += "E - Elasticsearch MapReduce\n";
    tests += "J - JSON/HDFS MapReduce\n";
    tests += "ES - Elasticsearch Spark \n";
    tests += "JS - JSON/HDFS Spark \n";
    tests += "SS - Spark Streaming Tests \n";
    tests += "JSS - JSON/HDFS Spark Streaming \n";
    tests += "ESS - Elasticsearch Spark Streaming \n";

    Option optionTestSelection = new Option("t", "tests", true,
            "optional -- Select which tests to execute: \n" + tests);
    optionTestSelection.setRequired(false);
    optionTestSelection.setArgName("<testNum>:<subtestDesignator>");
    optionTestSelection.setType(String.class);
    options.addOption(optionTestSelection);

    return options;
}

From source file:org.apache.pluto.util.cli.AssemblerCLI.java

public AssemblerCLI(String[] args) {
    this.args = args;
    options = new Options();
    Option destination = new Option("d", "destination", true,
            "specify where the resulting webapp should be written ");
    destination.setArgName("file");

    Option debug = new Option("debug", false, "print debug information.");
    options.addOption(destination);//from  w ww  .  jav  a 2  s  .com
    options.addOption(debug);
}

From source file:org.apache.stratos.adc.mgt.cli.commands.SubscribeCommand.java

/**
 * Construct Options.//w  w  w.j  av  a 2s . com
 * 
 * @return Options expected from command-line.
 */
private Options constructOptions() {
    final Options options = new Options();
    Option policyOption = new Option(CliConstants.POLICY_OPTION, CliConstants.POLICY_LONG_OPTION, true,
            "Auto-scaling policy.\nPlease use \"" + CliConstants.POLICIES_ACTION
                    + "\" command to view the available policies.");
    policyOption.setArgName("policy name");
    options.addOption(policyOption);

    Option connectOption = new Option(CliConstants.CONNECT_OPTION, CliConstants.CONNECT_LONG_OPTION, true,
            "Data cartridge type");
    connectOption.setArgName("data cartridge type");
    options.addOption(connectOption);

    Option aliasOption = new Option(CliConstants.DATA_ALIAS_OPTION, CliConstants.DATA_ALIAS_LONG_OPTION, true,
            "Data cartridge alias");
    aliasOption.setArgName("alias");
    options.addOption(aliasOption);

    Option urlOption = new Option(CliConstants.REPO_URL_OPTION, CliConstants.REPO_URL_LONG_OPTION, true,
            "GIT repository URL");
    urlOption.setArgName("url");
    options.addOption(urlOption);

    options.addOption(CliConstants.PRIVATE_REPO_OPTION, CliConstants.PRIVATE_REPO_LONG_OPTION, false,
            "Private repository");

    Option usernameOption = new Option(CliConstants.USERNAME_OPTION, CliConstants.USERNAME_LONG_OPTION, true,
            "GIT repository username");
    usernameOption.setArgName("username");
    options.addOption(usernameOption);

    Option passwordOption = new Option(CliConstants.PASSWORD_OPTION, CliConstants.PASSWORD_LONG_OPTION, true,
            "GIT repository password");
    passwordOption.setArgName("password");
    passwordOption.setOptionalArg(true);
    options.addOption(passwordOption);
    return options;
}

From source file:org.apache.stratos.adc.mgt.cli.StratosApplication.java

/**
 * Construct Options./*from   w  ww.j a v a  2 s.c o m*/
 * 
 * @return Options expected from command-line.
 */
private Options constructOptions() {
    final Options options = new Options();
    Option usernameOption = new Option(CliConstants.USERNAME_OPTION, CliConstants.USERNAME_LONG_OPTION, true,
            "Username");
    usernameOption.setArgName("username");
    options.addOption(usernameOption);

    Option passwordOption = new Option(CliConstants.PASSWORD_OPTION, CliConstants.PASSWORD_LONG_OPTION, true,
            "Password");
    passwordOption.setArgName("password");
    passwordOption.setOptionalArg(true);
    options.addOption(passwordOption);
    options.addOption(CliConstants.HELP_OPTION, CliConstants.HELP_LONG_OPTION, false, "Display this help");
    options.addOption(CliConstants.TRACE_OPTION, false, "Enable trace logging");
    options.addOption(CliConstants.DEBUG_OPTION, false, "Enable debug logging");
    return options;
}

From source file:org.apache.stratos.cli.commands.AddApplicationPolicyCommand.java

private Options constructOptions() {
    final Options options = new Options();

    Option resourcePath = new Option(CliConstants.RESOURCE_PATH, CliConstants.RESOURCE_PATH_LONG_OPTION, true,
            "Application policy resource path");
    resourcePath.setArgName("resource path");
    options.addOption(resourcePath);//from  w ww .  j av  a 2s .c o  m

    return options;
}