Example usage for org.apache.commons.cli Option setRequired

List of usage examples for org.apache.commons.cli Option setRequired

Introduction

In this page you can find the example usage for org.apache.commons.cli Option setRequired.

Prototype

public void setRequired(boolean required) 

Source Link

Document

Sets whether this Option is mandatory.

Usage

From source file:org.apache.falcon.replication.FeedReplicator.java

protected CommandLine getCommand(String[] args) throws ParseException {
    Options options = new Options();
    Option opt = new Option("maxMaps", true, "max number of maps to use for this copy");
    opt.setRequired(true);
    options.addOption(opt);/* www . j  a  v a 2s.co m*/

    opt = new Option("mapBandwidth", true, "bandwidth per map (in MB) to use for this copy");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("sourcePaths", true, "comma separtated list of source paths to be copied");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("targetPath", true, "target path");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("falconFeedStorageType", true, "feed storage type");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("availabilityFlag", true, "availability flag");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_OVERWRITE.getName(), true,
            "option to force overwrite");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_IGNORE_ERRORS.getName(), true, "abort on error");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_SKIP_CHECKSUM.getName(), true, "skip checksums");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_REMOVE_DELETED_FILES.getName(), true,
            "remove deleted files - should there be files in the target directory that"
                    + "were removed from the source directory");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_BLOCK_SIZE.getName(), true,
            "preserve block size");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_REPLICATION_NUMBER.getName(), true,
            "preserve replication count");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(ReplicationDistCpOption.DISTCP_OPTION_PRESERVE_PERMISSIONS.getName(), true,
            "preserve permissions");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option("counterLogDir", true, "log directory to store job counter file");
    opt.setRequired(false);
    options.addOption(opt);

    return new GnuParser().parse(options, args);
}

From source file:org.apache.falcon.retention.FeedEvictor.java

private CommandLine getCommand(String[] args) throws org.apache.commons.cli.ParseException {
    Options options = new Options();

    Option opt = new Option("feedBasePath", true, "base path for feed, ex /data/feed/${YEAR}-${MONTH}");
    opt.setRequired(true);
    options.addOption(opt);//from w ww . j  a  v  a 2s  .  c  om

    opt = new Option("falconFeedStorageType", true, "feed storage type, FileSystem or Table");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("retentionType", true, "type of retention policy like delete, archive etc");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("retentionLimit", true, "time limit for retention, ex hours(5), months(2), days(90)");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("timeZone", true, "timezone for feed, ex UTC");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("frequency", true,
            "frequency of feed,  ex hourly, daily, monthly, minute, weekly, yearly");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option("logFile", true, "log file for capturing size of feed");
    opt.setRequired(true);
    options.addOption(opt);

    return new GnuParser().parse(options, args);
}

From source file:org.apache.falcon.snapshots.replication.HdfsSnapshotReplicator.java

protected CommandLine getCommand(String[] args) throws FalconException {
    Options options = new Options();

    Option opt = new Option(HdfsSnapshotMirrorProperties.DISTCP_MAX_MAPS.getName(), true,
            "max number of maps to use for distcp");
    opt.setRequired(true);
    options.addOption(opt);/*from   w  w  w  .  j  a  v  a2 s  .c  om*/
    opt = new Option(HdfsSnapshotMirrorProperties.MAP_BANDWIDTH_IN_MB.getName(), true,
            "Bandwidth in MB/s used by each mapper during replication");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_NN.getName(), true, "Source NN");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_EXEC_URL.getName(), true,
            "Replication instance job Exec Url");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_NN_KERBEROS_PRINCIPAL.getName(), true,
            "Replication instance job NN Kerberos Principal");
    opt.setRequired(false);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_SNAPSHOT_DIR.getName(), true,
            "Source snapshot-able dir to replicate");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_NN.getName(), true, "Target NN");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_EXEC_URL.getName(), true,
            "Replication instance target Exec Url");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_NN_KERBEROS_PRINCIPAL.getName(), true,
            "Replication instance target NN Kerberos Principal");
    opt.setRequired(false);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_SNAPSHOT_DIR.getName(), true,
            "Target snapshot-able dir to replicate");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.TDE_ENCRYPTION_ENABLED.getName(), true,
            "Is TDE encryption enabled on dirs being replicated?");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.SNAPSHOT_JOB_NAME.getName(), true,
            "Replication instance job name");
    opt.setRequired(true);
    options.addOption(opt);

    try {
        return new GnuParser().parse(options, args);
    } catch (ParseException pe) {
        LOG.info("Unabel to parse commad line arguments for HdfsSnapshotReplicator " + pe.getMessage());
        throw new FalconException(pe.getMessage());
    }
}

From source file:org.apache.falcon.snapshots.retention.HdfsSnapshotEvictor.java

private CommandLine getCommand(String[] args) throws org.apache.commons.cli.ParseException {
    Options options = new Options();

    Option opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_NN.getName(), true, "Source Cluster");
    opt.setRequired(true);
    options.addOption(opt);/*from   ww  w  . j a  v  a2  s  .c  o m*/
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_EXEC_URL.getName(), true,
            "Replication instance job Exec Url");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_NN_KERBEROS_PRINCIPAL.getName(), true,
            "Replication instance job NN Kerberos Principal");
    opt.setRequired(false);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_SNAPSHOT_DIR.getName(), true,
            "Source snapshot-able dir to replicate");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_NN.getName(), true, "Target Cluster");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_SNAPSHOT_DIR.getName(), true,
            "Target snapshot-able dir to replicate");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_EXEC_URL.getName(), true,
            "Replication instance target Exec Url");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_NN_KERBEROS_PRINCIPAL.getName(), true,
            "Replication instance target NN Kerberos Principal");
    opt.setRequired(false);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.SNAPSHOT_JOB_NAME.getName(), true,
            "Replication instance job name");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_SNAPSHOT_RETENTION_POLICY.getName(), true,
            "Source retention policy");
    opt.setRequired(false);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_SNAPSHOT_RETENTION_AGE_LIMIT.getName(), true,
            "Source delete snapshots older than agelimit");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.SOURCE_SNAPSHOT_RETENTION_NUMBER.getName(), true,
            "Source number of snapshots to retain");
    opt.setRequired(true);
    options.addOption(opt);

    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_SNAPSHOT_RETENTION_POLICY.getName(), true,
            "Target retention policy");
    opt.setRequired(false);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_SNAPSHOT_RETENTION_AGE_LIMIT.getName(), true,
            "Target delete snapshots older than agelimit");
    opt.setRequired(true);
    options.addOption(opt);
    opt = new Option(HdfsSnapshotMirrorProperties.TARGET_SNAPSHOT_RETENTION_NUMBER.getName(), true,
            "Target number of snapshots to retain");
    opt.setRequired(true);
    options.addOption(opt);

    return new GnuParser().parse(options, args);
}

From source file:org.apache.falcon.workflow.FalconPostProcessing.java

private static void addOption(Options options, Arg arg, boolean isRequired) {
    Option option = arg.getOption();
    option.setRequired(isRequired);
    options.addOption(option);// w  w w.j ava2s.com
}

From source file:org.apache.falcon.workflow.WorkflowExecutionContext.java

private static void addOption(Options options, WorkflowExecutionArgs arg, boolean isRequired) {
    Option option = arg.getOption();
    option.setRequired(isRequired);
    options.addOption(option);//from   w  ww .java  2s.co  m
}

From source file:org.apache.flink.runtime.taskmanager.TaskManager.java

/**
 * Entry point for the TaskManager executable.
 * //w ww. ja  v  a2s  .  c om
 * @param args Arguments from the command line
 * @throws IOException 
 */
@SuppressWarnings("static-access")
public static void main(String[] args) throws IOException {
    Option configDirOpt = OptionBuilder.withArgName("config directory").hasArg()
            .withDescription("Specify configuration directory.").create("configDir");
    // tempDir option is used by the YARN client.
    Option tempDir = OptionBuilder.withArgName("temporary directory (overwrites configured option)").hasArg()
            .withDescription("Specify temporary directory.").create(ARG_CONF_DIR);
    configDirOpt.setRequired(true);
    tempDir.setRequired(false);
    Options options = new Options();
    options.addOption(configDirOpt);
    options.addOption(tempDir);

    CommandLineParser parser = new GnuParser();
    CommandLine line = null;
    try {
        line = parser.parse(options, args);
    } catch (ParseException e) {
        System.err.println("CLI Parsing failed. Reason: " + e.getMessage());
        System.exit(STARTUP_FAILURE_RETURN_CODE);
    }

    String configDir = line.getOptionValue(configDirOpt.getOpt(), null);
    String tempDirVal = line.getOptionValue(tempDir.getOpt(), null);

    // First, try to load global configuration
    GlobalConfiguration.loadConfiguration(configDir);
    if (tempDirVal != null // the YARN TM runner has set a value for the temp dir
            // the configuration does not contain a temp directory
            && GlobalConfiguration.getString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, null) == null) {
        Configuration c = GlobalConfiguration.getConfiguration();
        c.setString(ConfigConstants.TASK_MANAGER_TMP_DIR_KEY, tempDirVal);
        LOG.info("Setting temporary directory to " + tempDirVal);
        GlobalConfiguration.includeConfiguration(c);
    }

    // print some startup environment info, like user, code revision, etc
    EnvironmentInformation.logEnvironmentInfo(LOG, "TaskManager");

    // Create a new task manager object
    try {
        createTaskManager(ExecutionMode.CLUSTER);
    } catch (Throwable t) {
        LOG.error("Taskmanager startup failed: " + t.getMessage(), t);
        System.exit(STARTUP_FAILURE_RETURN_CODE);
    }

    // park the main thread to keep the JVM alive (all other threads may be daemon threads)
    Object mon = new Object();
    synchronized (mon) {
        try {
            mon.wait();
        } catch (InterruptedException ex) {
        }
    }
}

From source file:org.apache.flume.channel.file.CheckpointRebuilder.java

public static void main(String[] args) throws Exception {
    Options options = new Options();
    Option opt = new Option("c", true, "checkpoint directory");
    opt.setRequired(true);
    options.addOption(opt);//from   w w  w  .  j a  v  a  2s .c  om
    opt = new Option("l", true, "comma-separated list of log directories");
    opt.setRequired(true);
    options.addOption(opt);
    options.addOption(opt);
    opt = new Option("t", true, "capacity of the channel");
    opt.setRequired(true);
    options.addOption(opt);
    CommandLineParser parser = new GnuParser();
    CommandLine cli = parser.parse(options, args);
    File checkpointDir = new File(cli.getOptionValue("c"));
    String[] logDirs = cli.getOptionValue("l").split(",");
    List<File> logFiles = Lists.newArrayList();
    for (String logDir : logDirs) {
        logFiles.addAll(LogUtils.getLogs(new File(logDir)));
    }
    int capacity = Integer.parseInt(cli.getOptionValue("t"));
    File checkpointFile = new File(checkpointDir, "checkpoint");
    if (checkpointFile.exists()) {
        LOG.error("Cannot execute fast replay",
                new IllegalStateException("Checkpoint exists" + checkpointFile));
    } else {
        EventQueueBackingStore backingStore = EventQueueBackingStoreFactory.get(checkpointFile, capacity,
                "channel");
        FlumeEventQueue queue = new FlumeEventQueue(backingStore, new File(checkpointDir, "inflighttakes"),
                new File(checkpointDir, "inflightputs"));
        CheckpointRebuilder rebuilder = new CheckpointRebuilder(logFiles, queue);
        if (rebuilder.rebuild()) {
            rebuilder.writeCheckpoint();
        } else {
            LOG.error("Could not rebuild the checkpoint due to errors.");
        }
    }
}

From source file:org.apache.flume.tools.PasswordObfuscator.java

/**
 *
 * @param args  needs --outfile/*from   ww  w  .  j  a  v  a2 s . c o m*/
 */
public static void main(String[] args) throws IOException, ParseException {
    Options options = new Options();

    Option option = new Option(null, "outfile", true, "the file in which to store the password");
    option.setRequired(true);
    options.addOption(option);

    CommandLineParser parser = new GnuParser();
    CommandLine commandLine = parser.parse(options, args);

    String outputFile = commandLine.getOptionValue("outfile");

    System.out.println("Enter the password : ");
    String password = new String(System.console().readPassword());
    System.out.println("Verify password    : ");
    if (!password.equals(new String(System.console().readPassword()))) {
        System.err.println("Passwords do not match. Please try again");
        return;
    }

    try {
        encodeToFile(password, outputFile);
        System.out.println();
        System.out.println("Password has been stored in file : " + outputFile);
    } catch (IOException e) {
        System.err.println("Unable to write to output file : " + outputFile);
    }

}

From source file:org.apache.hadoop.hbase.migration.UpgradeTo96.java

private void setOptions() {
    options.addOption("h", "help", false, "Help");
    options.addOption(new Option("check", false,
            "Run upgrade check; looks for HFileV1 " + " under ${hbase.rootdir} or provided 'dir' directory."));
    options.addOption(new Option("execute", false, "Run upgrade; zk and hdfs must be up, hbase down"));
    Option pathOption = new Option("dir", true, "Relative path of dir to check for HFileV1s.");
    pathOption.setRequired(false);
    options.addOption(pathOption);//from  w  ww. j a  v a 2  s  . c  o  m
}