Example usage for org.apache.commons.daemon DaemonContext getArguments

List of usage examples for org.apache.commons.daemon DaemonContext getArguments

Introduction

In this page you can find the example usage for org.apache.commons.daemon DaemonContext getArguments.

Prototype

public String[] getArguments();

Source Link

Document

Returns an array of String arguments supplied by the environment.

Usage

From source file:com.twentyn.reachables.order.Service.java

@Override
public void init(DaemonContext context) throws DaemonInitException, Exception {
    String args[] = context.getArguments();
    LOGGER.info("Daemon initializing with arguments: %s", StringUtils.join(args, " "));
    init(args);//  w  w  w  .  ja v  a 2  s  .c  o  m
}

From source file:com.buaa.cfs.nfs3.PrivilegedNfsGatewayStarter.java

@Override
public void init(DaemonContext context) throws Exception {
    System.err.println("Initializing privileged NFS client socket...");
    NfsConfiguration conf = new NfsConfiguration();
    int clientPort = conf.getInt(NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY,
            NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_DEFAULT);
    if (clientPort < 1 || clientPort > 1023) {
        throw new RuntimeException("Must start privileged NFS server with '"
                + NfsConfigKeys.DFS_NFS_REGISTRATION_PORT_KEY + "' configured to a " + "privileged port.");
    }/*w w w.j  a v  a  2s.  c  o m*/
    registrationSocket = new DatagramSocket(new InetSocketAddress("localhost", clientPort));
    registrationSocket.setReuseAddress(true);
    args = context.getArguments();
}

From source file:org.apache.activemq.console.ActiveMQLauncher.java

public void init(DaemonContext arg0) throws Exception {
    // we need to save the args we started with.
    args = Arrays.asList(arg0.getArguments());
}

From source file:org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.java

@Override
public void init(DaemonContext context) throws Exception {
    System.err.println("Initializing secure datanode resources");
    // We should only start up a secure datanode in a Kerberos-secured cluster
    Configuration conf = new Configuration(); // Skip UGI method to not log in
    if (!conf.get(HADOOP_SECURITY_AUTHENTICATION).equals("kerberos"))
        throw new RuntimeException("Cannot start secure datanode in unsecure cluster");

    // Stash command-line arguments for regular datanode
    args = context.getArguments();

    // Obtain secure port for data streaming to datanode
    InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
    int socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT);

    ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
    ss.bind(socAddr, 0);/*  w ww .  j a  v a  2 s.c o  m*/

    // Check that we got the port we need
    if (ss.getLocalPort() != socAddr.getPort())
        throw new RuntimeException("Unable to bind on specified streaming port in secure " + "context. Needed "
                + socAddr.getPort() + ", got " + ss.getLocalPort());

    // Obtain secure listener for web server
    SelectChannelConnector listener = (SelectChannelConnector) HttpServer.createDefaultChannelConnector();
    InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
    listener.setHost(infoSocAddr.getHostName());
    listener.setPort(infoSocAddr.getPort());
    // Open listener here in order to bind to port as root
    listener.open();
    if (listener.getPort() != infoSocAddr.getPort())
        throw new RuntimeException("Unable to bind on specified info port in secure " + "context. Needed "
                + socAddr.getPort() + ", got " + ss.getLocalPort());

    if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023)
        throw new RuntimeException(
                "Cannot start secure datanode on non-privileged " + " ports. (streaming port = " + ss
                        + " ) (http listener port = " + listener.getConnection() + "). Exiting.");

    System.err.println("Successfully obtained privileged resources (streaming port = " + ss
            + " ) (http listener port = " + listener.getConnection() + ")");

    resources = new SecureResources(ss, listener);
}

From source file:org.apache.rya.streams.querymanager.QueryManagerDaemon.java

@Override
public void init(final DaemonContext context) throws DaemonInitException, Exception {
    requireNonNull(context);//from w  w  w  . ja v  a  2 s  .  co m

    // Parse the command line arguments for the configuration file to use.
    final String[] args = context.getArguments();
    final DaemonParameters params = new DaemonParameters();
    try {
        new JCommander(params).parse(args);
    } catch (final ParameterException e) {
        throw new DaemonInitException("Unable to parse the command line arguments.", e);
    }
    final Path configFile = params.config != null ? Paths.get(params.config) : DEFAULT_CONFIGURATION_PATH;
    log.info("Loading the following configuration file: " + configFile);

    // Unmarshall the configuration file into an object.
    final QueryManagerConfig config;
    try (final InputStream stream = Files.newInputStream(configFile)) {
        config = QueryManagerConfigUnmarshaller.unmarshall(stream);
    } catch (final JAXBException | SAXException e) {
        throw new DaemonInitException("Unable to marshall the configuration XML file: " + configFile, e);
    }

    // Read the source polling period from the configuration.
    final QueryChanngeLogDiscoveryPeriod periodConfig = config.getPerformanceTunning()
            .getQueryChanngeLogDiscoveryPeriod();
    final long period = periodConfig.getValue().longValue();
    final TimeUnit units = TimeUnit.valueOf(periodConfig.getUnits().toString());
    log.info("Query Change Log Polling Period: " + period + " " + units);
    final Scheduler scheduler = Scheduler.newFixedRateSchedule(0, period, units);

    // Initialize a QueryChangeLogSource.
    final Kafka kafka = config.getQueryChangeLogSource().getKafka();
    log.info("Kafka Source: " + kafka.getHostname() + ":" + kafka.getPort());
    final QueryChangeLogSource source = new KafkaQueryChangeLogSource(kafka.getHostname(), kafka.getPort(),
            scheduler);

    // Initialize a QueryExecutor.
    final String zookeeperServers = config.getQueryExecutor().getLocalKafkaStreams().getZookeepers();
    final KafkaStreamsFactory streamsFactory = new SingleThreadKafkaStreamsFactory(
            kafka.getHostname() + ":" + kafka.getPort());
    final QueryExecutor queryExecutor = new LocalQueryExecutor(new CreateKafkaTopic(zookeeperServers),
            streamsFactory);

    // Initialize the QueryManager using the configured resources.
    manager = new QueryManager(queryExecutor, source, period, units);
}

From source file:org.codekaizen.daemon.play.PlayDaemon.java

@Override
public void init(final DaemonContext context) throws DaemonInitException {
    if (context.getArguments() != null) {
        args = context.getArguments();/*from   w w w .j a va 2s  . c o m*/
    }
    if (args.length > 0) {
        applicationHome = new File(args[0]);
    }
    if (args.length > 1) {
        final String port = args[1];
        if (DISABLED.equals(port)) {
            httpPort = Option.empty();
        } else {
            try {
                httpPort = Option.apply((Object) Integer.valueOf(port));
            } catch (final NumberFormatException cause) {
                throw new DaemonInitException("specified port [" + port + "] is invalid");
            }
        }
    }
    if (args.length > 2) {
        final String port = args[2];
        if (DISABLED.equals(port)) {
            httpsPort = Option.empty();
        } else {
            try {
                httpsPort = Option.apply((Object) Integer.valueOf(port));
            } catch (final NumberFormatException cause) {
                throw new DaemonInitException("specified port [" + port + "] is invalid");
            }
        }
    }
    if (args.length > 3) {
        address = args[3];
    }
    if (!applicationHome.isDirectory()) {
        throw new DaemonInitException(
                "specified application home [" + applicationHome + "] is not a directory");
    }
}

From source file:org.codekaizen.daemon.play.PlayDaemonTest.java

@Test
public void shouldInitializeWithCommandLineArguments() throws DaemonInitException {
    final String[] args = new String[4];
    args[0] = System.getProperty("java.io.tmpdir");
    args[1] = "disabled";
    args[2] = "9092";
    args[3] = "0.0.0.0";
    final DaemonContext context = mock(DaemonContext.class);
    when(context.getArguments()).thenReturn(args);
    final PlayDaemon daemon = new PlayDaemon();
    daemon.init(context);/*from  ww  w .ja v  a  2s  .c  o  m*/
    assertTrue(daemon.httpPort.isEmpty());
    assertFalse(daemon.httpsPort.isEmpty());
    assertEquals(args[3], daemon.address);
}

From source file:org.eclipse.scada.da.utils.daemon.ExporterDaemon.java

@Override
public void init(final DaemonContext ctx) throws Exception {
    logger.info("Initializing ExporterDaemon");

    if (ctx.getArguments().length < 1) {
        logger.warn("No arguments passed. No config file available");
        throw new Exception("Invalid arguments: exporter <configfile>");
    }/*from ww w .  ja v a 2  s.c o m*/

    final String fileName = ctx.getArguments()[0];
    logger.info("Loading configuration from: {}", fileName);
    final File configFile = new File(fileName);
    if (!configFile.canRead()) {
        throw new Exception(String.format("'%s' is not a file or can not be read", fileName));
    }

    this.controller = new Controller(configFile);
}

From source file:org.gnieh.blue.launcher.Main.java

/**
 * <p>/*from   w w  w .  j  a v a  2s .c o  m*/
 * This method performs the main task of constructing an framework instance
 * and starting its execution. The following functions are performed
 * when invoked:
 * </p>
 * <ol>
 *   <li><i><b>Examine and verify command-line arguments.</b></i> The launcher
 *       accepts a "<tt>-b</tt>" command line switch to set the bundle auto-deploy
 *       directory and a single argument to set the bundle cache directory.
 *   </li>
 *   <li><i><b>Read the system properties file.</b></i> This is a file
 *       containing properties to be pushed into <tt>System.setProperty()</tt>
 *       before starting the framework. This mechanism is mainly shorthand
 *       for people starting the framework from the command line to avoid having
 *       to specify a bunch of <tt>-D</tt> system property definitions.
 *       The only properties defined in this file that will impact the framework's
 *       behavior are the those concerning setting HTTP proxies, such as
 *       <tt>http.proxyHost</tt>, <tt>http.proxyPort</tt>, and
 *       <tt>http.proxyAuth</tt>. Generally speaking, the framework does
 *       not use system properties at all.
 *   </li>
 *   <li><i><b>Read the framework's configuration property file.</b></i> This is
 *       a file containing properties used to configure the framework
 *       instance and to pass configuration information into
 *       bundles installed into the framework instance. The configuration
 *       property file is called <tt>config.properties</tt> by default
 *       and is located in the <tt>conf/</tt> directory of the Felix
 *       installation directory, which is the parent directory of the
 *       directory containing the <tt>felix.jar</tt> file. It is possible
 *       to use a different location for the property file by specifying
 *       the desired URL using the <tt>felix.config.properties</tt>
 *       system property; this should be set using the <tt>-D</tt> syntax
 *       when executing the JVM. If the <tt>config.properties</tt> file
 *       cannot be found, then default values are used for all configuration
 *       properties. Refer to the
 *       <a href="Felix.html#Felix(java.util.Map)"><tt>Felix</tt></a>
 *       constructor documentation for more information on framework
 *       configuration properties.
 *   </li>
 *   <li><i><b>Copy configuration properties specified as system properties
 *       into the set of configuration properties.</b></i> Even though the
 *       Felix framework does not consult system properties for configuration
 *       information, sometimes it is convenient to specify them on the command
 *       line when launching Felix. To make this possible, the Felix launcher
 *       copies any configuration properties specified as system properties
 *       into the set of configuration properties passed into Felix.
 *   </li>
 *   <li><i><b>Add shutdown hook.</b></i> To make sure the framework shutdowns
 *       cleanly, the launcher installs a shutdown hook; this can be disabled
 *       with the <tt>felix.shutdown.hook</tt> configuration property.
 *   </li>
 *   <li><i><b>Create and initialize a framework instance.</b></i> The OSGi standard
 *       <tt>FrameworkFactory</tt> is retrieved from <tt>META-INF/services</tt>
 *       and used to create a framework instance with the configuration properties.
 *   </li>
 *   <li><i><b>Auto-deploy bundles.</b></i> All bundles in the auto-deploy
 *       directory are deployed into the framework instance.
 *   </li>
 *   <li><i><b>Start the framework.</b></i> The framework is started and
 *       the launcher thread waits for the framework to shutdown.
 *   </li>
 * </ol>
 * <p>
 * It should be noted that simply starting an instance of the framework is not
 * enough to create an interactive session with it. It is necessary to install
 * and start bundles that provide a some means to interact with the framework;
 * this is generally done by bundles in the auto-deploy directory or specifying
 * an "auto-start" property in the configuration property file. If no bundles
 * providing a means to interact with the framework are installed or if the
 * configuration property file cannot be found, the framework will appear to
 * be hung or deadlocked. This is not the case, it is executing correctly,
 * there is just no way to interact with it.
 * </p>
 * <p>
 * The launcher provides two ways to deploy bundles into a framework at
 * startup, which have associated configuration properties:
 * </p>
 * <ul>
 *   <li>Bundle auto-deploy - Automatically deploys all bundles from a
 *       specified directory, controlled by the following configuration
 *       properties:
 *     <ul>
 *       <li><tt>felix.auto.deploy.dir</tt> - Specifies the auto-deploy directory
 *           from which bundles are automatically deploy at framework startup.
 *           The default is the <tt>bundle/</tt> directory of the current directory.
 *       </li>
 *       <li><tt>felix.auto.deploy.action</tt> - Specifies the auto-deploy actions
 *           to be found on bundle JAR files found in the auto-deploy directory.
 *           The possible actions are <tt>install</tt>, <tt>update</tt>,
 *           <tt>start</tt>, and <tt>uninstall</tt>. If no actions are specified,
 *           then the auto-deploy directory is not processed. There is no default
 *           value for this property.
 *       </li>
 *     </ul>
 *   </li>
 *   <li>Bundle auto-properties - Configuration properties which specify URLs
 *       to bundles to install/start:
 *     <ul>
 *       <li><tt>felix.auto.install.N</tt> - Space-delimited list of bundle
 *           URLs to automatically install when the framework is started,
 *           where <tt>N</tt> is the start level into which the bundle will be
 *           installed (e.g., felix.auto.install.2).
 *       </li>
 *       <li><tt>felix.auto.start.N</tt> - Space-delimited list of bundle URLs
 *           to automatically install and start when the framework is started,
 *           where <tt>N</tt> is the start level into which the bundle will be
 *           installed (e.g., felix.auto.start.2).
 *       </li>
 *     </ul>
 *   </li>
 * </ul>
 * <p>
 * These properties should be specified in the <tt>config.properties</tt>
 * so that they can be processed by the launcher during the framework
 * startup process.
 * </p>
 * @param args Accepts arguments to set the auto-deploy directory and/or
 *        the bundle cache directory.
 * @throws Exception If an error occurs.
**/
@Override
public void init(DaemonContext dc) throws Exception {

    System.out.println("Starting OSGi framework");

    String[] args = dc.getArguments();
    // Look for bundle directory and/or cache directory.
    // We support at most one argument, which is the bundle
    // cache directory.
    String bundleDir = null;
    String cacheDir = null;
    boolean expectBundleDir = false;
    for (int i = 0; i < args.length; i++) {
        if (args[i].equals(BUNDLE_DIR_SWITCH)) {
            expectBundleDir = true;
        } else if (expectBundleDir) {
            bundleDir = args[i];
            expectBundleDir = false;
        } else {
            cacheDir = args[i];
        }
    }

    if ((args.length > 3) || (expectBundleDir && bundleDir == null)) {
        System.out.println("Usage: [-b <bundle-deploy-dir>] [<bundle-cache-dir>]");
        System.exit(0);
    }

    // Load system properties.
    Main.loadSystemProperties();

    // Read configuration properties.
    Map<String, String> configProps = Main.loadConfigProperties();
    // If no configuration properties were found, then create
    // an empty properties object.
    if (configProps == null) {
        System.err.println("No " + CONFIG_PROPERTIES_FILE_VALUE + " found.");
        configProps = new HashMap<String, String>();
    }

    // Copy framework properties from the system properties.
    Main.copySystemProperties(configProps);

    // If there is a passed in bundle auto-deploy directory, then
    // that overwrites anything in the config file.
    if (bundleDir != null) {
        configProps.put(AutoProcessor.AUTO_DEPLOY_DIR_PROPERY, bundleDir);
    }

    // If there is a passed in bundle cache directory, then
    // that overwrites anything in the config file.
    if (cacheDir != null) {
        configProps.put(Constants.FRAMEWORK_STORAGE, cacheDir);
    }

    // If enabled, register a shutdown hook to make sure the framework is
    // cleanly shutdown when the VM exits.
    String enableHook = configProps.get(SHUTDOWN_HOOK_PROP);
    if ((enableHook == null) || !enableHook.equalsIgnoreCase("false")) {
        Runtime.getRuntime().addShutdownHook(new Thread("Felix Shutdown Hook") {
            public void run() {
                try {
                    if (m_fwk != null) {
                        m_fwk.stop();
                        m_fwk.waitForStop(0);
                    }
                } catch (Exception ex) {
                    System.err.println("Error stopping framework: " + ex);
                }
            }
        });
    }

    // Create an instance of the framework.
    FrameworkFactory factory = getFrameworkFactory();
    m_fwk = factory.newFramework(configProps);
    // Initialize the framework, but don't start it yet.
    m_fwk.init();
    // Use the system bundle context to process the auto-deploy
    // and auto-install/auto-start properties.
    AutoProcessor.process(configProps, m_fwk.getBundleContext());

}

From source file:org.iobserve.analysis.service.AnalysisDaemon.java

@Override
public void init(final DaemonContext context) throws DaemonInitException, MalformedURLException {
    final String[] args = context.getArguments();
    final CommandLineParser parser = new DefaultParser();
    try {/* www. ja v  a  2s  .  c  o  m*/
        CommandLine commandLine = parser.parse(AnalysisDaemon.createHelpOptions(), args);

        if (commandLine.hasOption("h")) {
            final HelpFormatter formatter = new HelpFormatter();
            formatter.printHelp("iobserve-service", AnalysisDaemon.createOptions());
        } else {
            commandLine = parser.parse(AnalysisDaemon.createOptions(), args);

            /** get configuration parameter. */
            final int listenPort = Integer.parseInt(commandLine.getOptionValue("i"));
            final String outputHostname = commandLine.getOptionValues("o")[0];
            final String outputPort = commandLine.getOptionValues("o")[1];

            final File pcmModelsDirectory = new File(commandLine.getOptionValue("p"));

            final int varianceOfUserGroups = Integer
                    .parseInt(commandLine.getOptionValue(AnalysisDaemon.VARIANCE_OF_USER_GROUPS));
            final int thinkTime = Integer.parseInt(commandLine.getOptionValue(AnalysisDaemon.THINK_TIME));
            final boolean closedWorkload = commandLine.hasOption(AnalysisDaemon.CLOSED_WORKLOAD);

            final String systemId = commandLine.getOptionValue("s");

            /** process parameter. */
            if (pcmModelsDirectory.exists()) {
                if (pcmModelsDirectory.isDirectory()) {
                    final InitializeModelProviders modelProvider = new InitializeModelProviders(
                            pcmModelsDirectory);

                    final ICorrespondence correspondenceModel = modelProvider.getCorrespondenceModel();
                    final UsageModelProvider usageModelProvider = modelProvider.getUsageModelProvider();
                    final RepositoryModelProvider repositoryModelProvider = modelProvider
                            .getRepositoryModelProvider();
                    final ResourceEnvironmentModelProvider resourceEvnironmentModelProvider = modelProvider
                            .getResourceEnvironmentModelProvider();
                    final AllocationModelProvider allocationModelProvider = modelProvider
                            .getAllocationModelProvider();
                    final SystemModelProvider systemModelProvider = modelProvider.getSystemModelProvider();

                    final Configuration configuration = new ServiceConfiguration(listenPort, outputHostname,
                            outputPort, systemId, varianceOfUserGroups, thinkTime, closedWorkload,
                            correspondenceModel, usageModelProvider, repositoryModelProvider,
                            resourceEvnironmentModelProvider, allocationModelProvider, systemModelProvider);

                    this.thread = new AnalysisThread(this, configuration);
                } else {
                    throw new DaemonInitException("CLI error: PCM directory " + pcmModelsDirectory.getPath()
                            + " is not a directory.");
                }
            } else {
                throw new DaemonInitException(
                        "CLI error: PCM directory " + pcmModelsDirectory.getPath() + " does not exist.");
            }
        }
    } catch (final ParseException exp) {
        final HelpFormatter formatter = new HelpFormatter();
        formatter.printHelp("iobserve-analysis", AnalysisDaemon.createOptions());
        throw new DaemonInitException("CLI error: " + exp.getMessage());
    }
}