Example usage for org.apache.commons.exec ExecuteWatchdog destroyProcess

List of usage examples for org.apache.commons.exec ExecuteWatchdog destroyProcess

Introduction

In this page you can find the example usage for org.apache.commons.exec ExecuteWatchdog destroyProcess.

Prototype

public synchronized void destroyProcess() 

Source Link

Document

Destroys the running process manually.

Usage

From source file:com.netflix.spinnaker.halyard.deploy.job.v1.JobExecutorLocal.java

@Override
public void cancelJob(String jobId) {
    log.info("Canceling job " + jobId + "...");

    // Remove the job from this executors's handler map.
    ExecutionHandler canceledJobHander = jobIdToHandlerMap.remove(jobId);
    if (canceledJobHander == null) {
        return;//from  w  ww .j  a  v  a2s  . co  m
    }

    ExecuteWatchdog watchdog = canceledJobHander.getWatchdog();
    if (watchdog == null) {
        return;
    }

    watchdog.destroyProcess();
}

From source file:com.netflix.spinnaker.halyard.core.job.v1.JobExecutorLocal.java

@Override
public void cancelJob(String jobId) {
    log.info("Canceling job " + jobId + "...");

    if (pendingJobSet.contains(jobId)) {
        pendingJobSet.remove(jobId);//from w  ww  .  j  a  va  2 s  . c  o  m
    }

    // Remove the job from this executors's handler map.
    ExecutionHandler canceledJobHander = jobIdToHandlerMap.remove(jobId);
    if (canceledJobHander == null) {
        return;
    }

    ExecuteWatchdog watchdog = canceledJobHander.getWatchdog();
    if (watchdog == null) {
        return;
    }

    watchdog.destroyProcess();
}

From source file:de.jsurf.http.HttpServerHandler.java

@Override
public void messageReceived(ChannelHandlerContext ctx, MessageEvent e) throws Exception {

    HttpRequest request = (HttpRequest) e.getMessage();
    HttpResponse response = new DefaultHttpResponse(HTTP_1_1, OK);
    response.setContent(ChannelBuffers.copiedBuffer(buf.toString(), CharsetUtil.UTF_8));
    response.setHeader(CONTENT_TYPE, "video/mpts");
    /*response.setChunked(true);
    response.setHeader(Names.TRANSFER_ENCODING, Values.CHUNKED);*/

    Channel c = e.getChannel();/*from w  w w.j  a v  a2  s. co  m*/

    // create a media reader
    String inputStream = HttpServerConfiguration.getConfiguration().getChannelInput(request.getUri());

    if (inputStream == null) {
        response = new DefaultHttpResponse(HTTP_1_1, NOT_FOUND);
        ChannelFuture future = c.write(response);
        future.addListener(ChannelFutureListener.CLOSE);
        return;
    }

    String path = new java.io.File(".").getCanonicalPath();
    log.debug("Current execution path: " + path);

    String[] parameters = new String[] { "-loglevel", "error", "-i", inputStream, "-vcodec", "copy", "-acodec",
            "copy", "-vbsf", "h264_mp4toannexb", "-f", "mpegts", "pipe:1" };

    CommandLine cmdLine = CommandLine.parse("ffmpeg.exe");
    cmdLine.addArguments(parameters);
    DefaultExecutor executor = new DefaultExecutor();
    final ExecuteWatchdog watchDog = new ExecuteWatchdog(86400000); // One day timeout          
    executor.setWatchdog(watchDog);

    PipedInputStream pin = new PipedInputStream();
    PipedOutputStream pout = new PipedOutputStream(pin);

    PumpStreamHandler streamHandler = new PumpStreamHandler(pout, System.err);
    executor.setStreamHandler(streamHandler);

    DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
    executor.execute(cmdLine, resultHandler);

    c.write(response);
    InputStream in = new BufferedInputStream(pin);
    ChannelFuture future = c.write(new ChunkedStream(in));

    future.addListener(new ChannelFutureListener() {
        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            try {
                log.debug("operationComplete: closeChannel");
                future.getChannel().close();
            } catch (Exception e) {

            }
            log.debug("operationComplete: Destroy ffmpeg process");
            watchDog.destroyProcess();
        }
    });
}

From source file:com.netflix.spinnaker.halyard.core.job.v1.JobExecutorLocal.java

@Override
public String startJob(JobRequest jobRequest, Map<String, String> env, InputStream stdIn,
        ByteArrayOutputStream stdOut, ByteArrayOutputStream stdErr) {
    List<String> tokenizedCommand = jobRequest.getTokenizedCommand();
    if (tokenizedCommand == null || tokenizedCommand.isEmpty()) {
        throw new IllegalArgumentException("JobRequest must include a tokenized command to run");
    }//from w  w  w  .  j  av  a  2  s  .c om

    final long timeoutMillis = jobRequest.getTimeoutMillis() == null ? ExecuteWatchdog.INFINITE_TIMEOUT
            : jobRequest.getTimeoutMillis();

    String jobId = UUID.randomUUID().toString();

    pendingJobSet.add(jobId);

    log.info("Scheduling job " + jobRequest.getTokenizedCommand() + " with id " + jobId);

    scheduler.createWorker().schedule(new Action0() {
        @Override
        public void call() {
            PumpStreamHandler pumpStreamHandler = new PumpStreamHandler(stdOut, stdErr, stdIn);
            CommandLine commandLine;

            log.info("Executing " + jobId + "with tokenized command: " + tokenizedCommand);

            // Grab the first element as the command.
            commandLine = new CommandLine(jobRequest.getTokenizedCommand().get(0));

            // Treat the rest as arguments.
            String[] arguments = Arrays.copyOfRange(tokenizedCommand.toArray(new String[0]), 1,
                    tokenizedCommand.size());

            commandLine.addArguments(arguments, false);

            DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
            ExecuteWatchdog watchdog = new ExecuteWatchdog(timeoutMillis) {
                @Override
                public void timeoutOccured(Watchdog w) {
                    // If a watchdog is passed in, this was an actual time-out. Otherwise, it is likely
                    // the result of calling watchdog.destroyProcess().
                    if (w != null) {
                        log.warn("Job " + jobId + " timed-out after " + timeoutMillis + "ms.");

                        cancelJob(jobId);
                    }

                    super.timeoutOccured(w);
                }
            };

            Executor executor = new DefaultExecutor();
            executor.setStreamHandler(pumpStreamHandler);
            executor.setWatchdog(watchdog);
            try {
                executor.execute(commandLine, env, resultHandler);
            } catch (IOException e) {
                throw new RuntimeException("Execution of " + jobId + " failed ", e);
            }

            // Give the job some time to spin up.
            try {
                Thread.sleep(500);
            } catch (InterruptedException ignored) {
            }

            jobIdToHandlerMap.put(jobId, new ExecutionHandler().setResultHandler(resultHandler)
                    .setWatchdog(watchdog).setStdOut(stdOut).setStdErr(stdErr));

            if (pendingJobSet.contains(jobId)) {
                pendingJobSet.remove(jobId);
            } else {
                // If the job was removed from the set of pending jobs by someone else, its deletion was requested
                jobIdToHandlerMap.remove(jobId);
                watchdog.destroyProcess();
            }
        }
    });

    return jobId;
}

From source file:org.apache.stratos.cartridge.agent.test.JavaCartridgeAgentTest.java

@After
public void tearDown() {
    for (Map.Entry<String, Executor> entry : executorList.entrySet()) {
        try {//from   w w w  .  j  av  a  2 s  . c  o  m
            String commandText = entry.getKey();
            Executor executor = entry.getValue();
            ExecuteWatchdog watchdog = executor.getWatchdog();
            if (watchdog != null) {
                log.info("Terminating process: " + commandText);
                watchdog.destroyProcess();
            }
            //                File workingDirectory = executor.getWorkingDirectory();
            //                if (workingDirectory != null) {
            //                    log.info("Cleaning working directory: " + workingDirectory.getAbsolutePath());
            //                    FileUtils.deleteDirectory(workingDirectory);
            //                }
        } catch (Exception ignore) {
        }
    }
    for (ServerSocket serverSocket : serverSocketList) {
        try {
            log.info("Stopping socket server: " + serverSocket.getLocalSocketAddress());
            serverSocket.close();
        } catch (IOException e) {
            log.info("Couldn't stop socket server " + serverSocket.getLocalSocketAddress() + ", "
                    + e.getMessage());
        }
    }

    try {
        log.info("Deleting source checkout folder...");
        FileUtils.deleteDirectory(new File("/tmp/test-jca-source"));
    } catch (Exception ignore) {
    }

    //this.instanceStatusEventReceiver.terminate();
    //  this.topologyEventReceiver.terminate();

    this.instanceActivated = false;
    this.instanceStarted = false;
}

From source file:org.apache.stratos.python.cartridge.agent.test.PythonCartridgeAgentTest.java

/**
 * TearDown method for test method testPythonCartridgeAgent
 *//*from w  ww  . j  a va2s.c o m*/
@After
public void tearDown() {
    for (Map.Entry<String, Executor> entry : executorList.entrySet()) {
        try {
            String commandText = entry.getKey();
            Executor executor = entry.getValue();
            ExecuteWatchdog watchdog = executor.getWatchdog();
            if (watchdog != null) {
                log.info("Terminating process: " + commandText);
                watchdog.destroyProcess();
            }
            File workingDirectory = executor.getWorkingDirectory();
            if (workingDirectory != null) {
                log.info("Cleaning working directory: " + workingDirectory.getAbsolutePath());
                FileUtils.deleteDirectory(workingDirectory);
            }
        } catch (Exception ignore) {
        }
    }
    for (ServerSocket serverSocket : serverSocketList) {
        try {
            log.info("Stopping socket server: " + serverSocket.getLocalSocketAddress());
            serverSocket.close();
        } catch (IOException ignore) {
        }
    }

    try {
        log.info("Deleting source checkout folder...");
        FileUtils.deleteDirectory(new File(SOURCE_PATH));
    } catch (Exception ignore) {

    }

    this.instanceStatusEventReceiver.terminate();
    this.topologyEventReceiver.terminate();

    this.instanceActivated = false;
    this.instanceStarted = false;
}

From source file:org.apache.zeppelin.submarine.job.thread.JobRunThread.java

public void run() {
    boolean tryLock = lockRunning.tryLock();
    if (false == tryLock) {
        LOGGER.warn("Can not get JobRunThread lockRunning!");
        return;/*w  ww . j  av a 2s . co  m*/
    }

    SubmarineUI submarineUI = submarineJob.getSubmarineUI();
    try {
        InterpreterContext intpContext = submarineJob.getIntpContext();
        String noteId = intpContext.getNoteId();
        String userName = intpContext.getAuthenticationInfo().getUser();
        String jobName = SubmarineUtils.getJobName(userName, noteId);

        if (true == running.get()) {
            String message = String.format("Job %s already running.", jobName);
            submarineUI.outputLog("WARN", message);
            LOGGER.warn(message);
            return;
        }
        running.set(true);

        Properties properties = submarineJob.getProperties();
        HdfsClient hdfsClient = submarineJob.getHdfsClient();
        File pythonWorkDir = submarineJob.getPythonWorkDir();

        submarineJob.setCurrentJobState(EXECUTE_SUBMARINE);

        String algorithmPath = properties.getProperty(SubmarineConstants.SUBMARINE_ALGORITHM_HDFS_PATH, "");
        if (!algorithmPath.startsWith("hdfs://")) {
            String message = "Algorithm file upload HDFS path, " + "Must be `hdfs://` prefix. now setting "
                    + algorithmPath;
            submarineUI.outputLog("Configuration error", message);
            return;
        }

        List<ParagraphInfo> paragraphInfos = intpContext.getIntpEventClient().getParagraphList(userName,
                noteId);
        String outputMsg = hdfsClient.saveParagraphToFiles(noteId, paragraphInfos,
                pythonWorkDir == null ? "" : pythonWorkDir.getAbsolutePath(), properties);
        if (!StringUtils.isEmpty(outputMsg)) {
            submarineUI.outputLog("Save algorithm file", outputMsg);
        }

        HashMap jinjaParams = SubmarineUtils.propertiesToJinjaParams(properties, submarineJob, true);

        URL urlTemplate = Resources.getResource(SubmarineJob.SUBMARINE_JOBRUN_TF_JINJA);
        String template = Resources.toString(urlTemplate, Charsets.UTF_8);
        Jinjava jinjava = new Jinjava();
        String submarineCmd = jinjava.render(template, jinjaParams);
        // If the first line is a newline, delete the newline
        int firstLineIsNewline = submarineCmd.indexOf("\n");
        if (firstLineIsNewline == 0) {
            submarineCmd = submarineCmd.replaceFirst("\n", "");
        }

        StringBuffer sbLogs = new StringBuffer(submarineCmd);
        submarineUI.outputLog("Submarine submit command", sbLogs.toString());

        long timeout = Long
                .valueOf(properties.getProperty(SubmarineJob.TIMEOUT_PROPERTY, SubmarineJob.defaultTimeout));
        CommandLine cmdLine = CommandLine.parse(SubmarineJob.shell);
        cmdLine.addArgument(submarineCmd, false);
        DefaultExecutor executor = new DefaultExecutor();
        ExecuteWatchdog watchDog = new ExecuteWatchdog(timeout);
        executor.setWatchdog(watchDog);
        StringBuffer sbLogOutput = new StringBuffer();
        executor.setStreamHandler(new PumpStreamHandler(new LogOutputStream() {
            @Override
            protected void processLine(String line, int level) {
                line = line.trim();
                if (!StringUtils.isEmpty(line)) {
                    sbLogOutput.append(line + "\n");
                }
            }
        }));

        if (Boolean.valueOf(properties.getProperty(SubmarineJob.DIRECTORY_USER_HOME))) {
            executor.setWorkingDirectory(new File(System.getProperty("user.home")));
        }

        Map<String, String> env = new HashMap<>();
        String launchMode = (String) jinjaParams.get(SubmarineConstants.INTERPRETER_LAUNCH_MODE);
        if (StringUtils.equals(launchMode, "yarn")) {
            // Set environment variables in the submarine interpreter container run on yarn
            String javaHome, hadoopHome, hadoopConf;
            javaHome = (String) jinjaParams.get(SubmarineConstants.DOCKER_JAVA_HOME);
            hadoopHome = (String) jinjaParams.get(SubmarineConstants.DOCKER_HADOOP_HDFS_HOME);
            hadoopConf = (String) jinjaParams.get(SubmarineConstants.SUBMARINE_HADOOP_CONF_DIR);
            env.put("JAVA_HOME", javaHome);
            env.put("HADOOP_HOME", hadoopHome);
            env.put("HADOOP_HDFS_HOME", hadoopHome);
            env.put("HADOOP_CONF_DIR", hadoopConf);
            env.put("YARN_CONF_DIR", hadoopConf);
            env.put("CLASSPATH", "`$HADOOP_HDFS_HOME/bin/hadoop classpath --glob`");
            env.put("ZEPPELIN_FORCE_STOP", "true");
        }

        LOGGER.info("Execute EVN: {}, Command: {} ", env.toString(), submarineCmd);
        AtomicBoolean cmdLineRunning = new AtomicBoolean(true);
        executor.execute(cmdLine, env, new DefaultExecuteResultHandler() {
            @Override
            public void onProcessComplete(int exitValue) {
                String message = String.format("jobName %s ProcessComplete exit value is : %d", jobName,
                        exitValue);
                LOGGER.info(message);
                submarineUI.outputLog("JOR RUN COMPLETE", message);
                cmdLineRunning.set(false);
                submarineJob.setCurrentJobState(EXECUTE_SUBMARINE_FINISHED);
            }

            @Override
            public void onProcessFailed(ExecuteException e) {
                String message = String.format("jobName %s ProcessFailed exit value is : %d, exception is : %s",
                        jobName, e.getExitValue(), e.getMessage());
                LOGGER.error(message);
                submarineUI.outputLog("JOR RUN FAILED", message);
                cmdLineRunning.set(false);
                submarineJob.setCurrentJobState(EXECUTE_SUBMARINE_ERROR);
            }
        });
        int loopCount = 100;
        while ((loopCount-- > 0) && cmdLineRunning.get() && running.get()) {
            Thread.sleep(1000);
        }
        if (watchDog.isWatching()) {
            watchDog.destroyProcess();
            Thread.sleep(1000);
        }
        if (watchDog.isWatching()) {
            watchDog.killedProcess();
        }

        // Check if it has been submitted to YARN
        Map<String, Object> jobState = submarineJob.getJobStateByYarn(jobName);
        loopCount = 50;
        while ((loopCount-- > 0) && !jobState.containsKey("state") && running.get()) {
            Thread.sleep(3000);
            jobState = submarineJob.getJobStateByYarn(jobName);
        }

        if (!jobState.containsKey("state")) {
            String message = String.format("JOB %s was not submitted to YARN!", jobName);
            LOGGER.error(message);
            submarineUI.outputLog("JOR RUN FAILED", message);
            submarineJob.setCurrentJobState(EXECUTE_SUBMARINE_ERROR);
        }
    } catch (Exception e) {
        LOGGER.error(e.getMessage(), e);
        submarineJob.setCurrentJobState(EXECUTE_SUBMARINE_ERROR);
        submarineUI.outputLog("Exception", e.getMessage());
    } finally {
        running.set(false);
        lockRunning.unlock();
    }
}

From source file:org.apache.zeppelin.submarine.job.thread.TensorboardRunThread.java

public void run() {
    SubmarineUI submarineUI = submarineJob.getSubmarineUI();

    boolean tryLock = lockRunning.tryLock();

    try {//from  w  w w .  j a  va2  s .c  om
        Properties properties = submarineJob.getProperties();
        String tensorboardName = SubmarineUtils.getTensorboardName(submarineJob.getUserName());
        if (true == running.get()) {
            String message = String.format("tensorboard %s already running.", tensorboardName);
            submarineUI.outputLog("WARN", message);
            LOGGER.warn(message);
            return;
        }
        running.set(true);

        HashMap jinjaParams = SubmarineUtils.propertiesToJinjaParams(properties, submarineJob, false);
        // update jobName -> tensorboardName
        jinjaParams.put(SubmarineConstants.JOB_NAME, tensorboardName);

        URL urlTemplate = Resources.getResource(SubmarineJob.SUBMARINE_TENSORBOARD_JINJA);
        String template = Resources.toString(urlTemplate, Charsets.UTF_8);
        Jinjava jinjava = new Jinjava();
        String submarineCmd = jinjava.render(template, jinjaParams);
        // If the first line is a newline, delete the newline
        int firstLineIsNewline = submarineCmd.indexOf("\n");
        if (firstLineIsNewline == 0) {
            submarineCmd = submarineCmd.replaceFirst("\n", "");
        }
        StringBuffer sbLogs = new StringBuffer(submarineCmd);
        submarineUI.outputLog("Submarine submit command", sbLogs.toString());

        long timeout = Long
                .valueOf(properties.getProperty(SubmarineJob.TIMEOUT_PROPERTY, SubmarineJob.defaultTimeout));
        CommandLine cmdLine = CommandLine.parse(SubmarineJob.shell);
        cmdLine.addArgument(submarineCmd, false);
        DefaultExecutor executor = new DefaultExecutor();
        ExecuteWatchdog watchDog = new ExecuteWatchdog(timeout);
        executor.setWatchdog(watchDog);
        StringBuffer sbLogOutput = new StringBuffer();
        executor.setStreamHandler(new PumpStreamHandler(new LogOutputStream() {
            @Override
            protected void processLine(String line, int level) {
                line = line.trim();
                if (!StringUtils.isEmpty(line)) {
                    sbLogOutput.append(line + "\n");
                }
            }
        }));

        if (Boolean.valueOf(properties.getProperty(SubmarineJob.DIRECTORY_USER_HOME))) {
            executor.setWorkingDirectory(new File(System.getProperty("user.home")));
        }

        Map<String, String> env = new HashMap<>();
        String launchMode = (String) jinjaParams.get(SubmarineConstants.INTERPRETER_LAUNCH_MODE);
        if (StringUtils.equals(launchMode, "yarn")) {
            // Set environment variables in the container
            String javaHome, hadoopHome, hadoopConf;
            javaHome = (String) jinjaParams.get(SubmarineConstants.DOCKER_JAVA_HOME);
            hadoopHome = (String) jinjaParams.get(SubmarineConstants.DOCKER_HADOOP_HDFS_HOME);
            hadoopConf = (String) jinjaParams.get(SubmarineConstants.SUBMARINE_HADOOP_CONF_DIR);
            env.put("JAVA_HOME", javaHome);
            env.put("HADOOP_HOME", hadoopHome);
            env.put("HADOOP_HDFS_HOME", hadoopHome);
            env.put("HADOOP_CONF_DIR", hadoopConf);
            env.put("YARN_CONF_DIR", hadoopConf);
            env.put("CLASSPATH", "`$HADOOP_HDFS_HOME/bin/hadoop classpath --glob`");
        }

        LOGGER.info("Execute EVN: {}, Command: {} ", env.toString(), submarineCmd);

        AtomicBoolean cmdLineRunning = new AtomicBoolean(true);
        executor.execute(cmdLine, env, new DefaultExecuteResultHandler() {
            @Override
            public void onProcessComplete(int exitValue) {
                String message = String.format("jobName %s ProcessComplete exit value is : %d", tensorboardName,
                        exitValue);
                LOGGER.info(message);
                submarineUI.outputLog("TENSORBOARD RUN COMPLETE", message);
                cmdLineRunning.set(false);
            }

            @Override
            public void onProcessFailed(ExecuteException e) {
                String message = String.format("jobName %s ProcessFailed exit value is : %d, exception is : %s",
                        tensorboardName, e.getExitValue(), e.getMessage());
                LOGGER.error(message);
                submarineUI.outputLog("TENSORBOARD RUN FAILED", message);
                cmdLineRunning.set(false);
            }
        });
        int loopCount = 100;
        while ((loopCount-- > 0) && cmdLineRunning.get() && running.get()) {
            Thread.sleep(1000);
        }
        if (watchDog.isWatching()) {
            watchDog.destroyProcess();
            Thread.sleep(1000);
        }
        if (watchDog.isWatching()) {
            watchDog.killedProcess();
        }

        // Check if it has been submitted to YARN
        Map<String, Object> jobState = submarineJob.getJobStateByYarn(tensorboardName);
        loopCount = 50;
        while ((loopCount-- > 0) && !jobState.containsKey("state") && running.get()) {
            Thread.sleep(3000);
            jobState = submarineJob.getJobStateByYarn(tensorboardName);
        }

        if (!jobState.containsKey("state")) {
            String message = String.format("tensorboard %s was not submitted to YARN!", tensorboardName);
            LOGGER.error(message);
            submarineUI.outputLog("JOR RUN FAILED", message);
        }
    } catch (Exception e) {
        LOGGER.error(e.getMessage(), e);
        submarineUI.outputLog("Exception", e.getMessage());
    } finally {
        running.set(false);
        lockRunning.unlock();
    }
}

From source file:org.epics.archiverappliance.TomcatSetup.java

public void tearDown() throws Exception {
    for (ExecuteWatchdog watchdog : executorWatchDogs) {
        // We brutally kill the process
        watchdog.destroyProcess();
        try {/*from ww  w  . j  ava 2s.co  m*/
            Thread.sleep(10 * 1000);
        } catch (Exception ex) {
        }
        assert (watchdog.killedProcess());
    }

    for (File cleanupFolder : cleanupFolders) {
        logger.debug("Cleaning up folder " + cleanupFolder.getAbsolutePath());
        FileUtils.deleteDirectory(cleanupFolder);
    }

    // So many timeouts; if we are running multiple tests in sequence; the Tomcat socket seems to wind up in a TIME_WAIT site for about 2 minutes.
    // Setting the net.netfilter.nf_conntrack_tcp_timeout_time_wait (using sysctl) seems to hurt other behaviours...
    // Sigh!
    try {
        Thread.sleep(3 * 60 * 1000);
    } catch (Exception ex) {
    }
}

From source file:org.opennms.gizmo.k8s.portforward.KubeCtlPortForwardingStrategy.java

@Override
public ForwardedPort portForward(String namespace, String pod, int remotePort) {
    CommandLine cmdLine = new CommandLine("kubectl");
    cmdLine.addArgument("--namespace=${namespace}");
    cmdLine.addArgument("port-forward");
    cmdLine.addArgument("${pod}");
    cmdLine.addArgument(":${remotePort}");

    HashMap<String, String> map = new HashMap<>();
    map.put("namespace", namespace);
    map.put("pod", pod);
    map.put("remotePort", Integer.toString(remotePort));
    cmdLine.setSubstitutionMap(map);/*from w  ww .  j  a v  a 2 s.  c  o  m*/

    ByteArrayOutputStream out = new ByteArrayOutputStream();
    ByteArrayOutputStream err = new ByteArrayOutputStream();
    PumpStreamHandler psh = new PumpStreamHandler(out, err);

    DefaultExecutor executor = new DefaultExecutor();
    final ExecuteWatchdog wd = new ExecuteWatchdog(ExecuteWatchdog.INFINITE_TIMEOUT);
    executor.setWatchdog(wd);
    executor.setStreamHandler(psh);

    DefaultExecuteResultHandler resultHandler = new DefaultExecuteResultHandler();
    try {
        executor.execute(cmdLine, resultHandler);
    } catch (IOException e) {
        throw Throwables.propagate(e);
    }

    final int localPort = waitForLocalPort(wd, out, err);
    return new ForwardedPort() {
        @Override
        public InetSocketAddress getAddress() {
            return new InetSocketAddress(InetAddress.getLoopbackAddress(), localPort);
        }

        @Override
        public void close() throws IOException {
            wd.destroyProcess();
        }

        @Override
        public String toString() {
            return String.format("ForwardedPort[localPort=%d]", localPort);
        }
    };
}