Example usage for com.google.common.base Throwables propagate

List of usage examples for com.google.common.base Throwables propagate

Introduction

In this page you can find the example usage for com.google.common.base Throwables propagate.

Prototype

public static RuntimeException propagate(Throwable throwable) 

Source Link

Document

Propagates throwable as-is if it is an instance of RuntimeException or Error , or else as a last resort, wraps it in a RuntimeException and then propagates.

Usage

From source file:com.cloudbees.api.Main.java

public static void main(String[] args) throws Exception {

    File beesCredentialsFile = new File(System.getProperty("user.home"), ".bees/bees.config");
    Preconditions.checkArgument(beesCredentialsFile.exists(), "File %s not found", beesCredentialsFile);
    Properties beesCredentials = new Properties();
    beesCredentials.load(new FileInputStream(beesCredentialsFile));
    String apiUrl = "https://api.cloudbees.com/api";
    String apiKey = beesCredentials.getProperty("bees.api.key");
    String secret = beesCredentials.getProperty("bees.api.secret");
    BeesClient client = new BeesClient(apiUrl, apiKey, secret, "xml", "1.0");
    client.setVerbose(false);/*ww w .j av  a2s .co m*/

    URL databasesUrl = Thread.currentThread().getContextClassLoader().getResource("databases.txt");
    Preconditions.checkNotNull(databasesUrl, "File 'databases.txt' NOT found in the classpath");

    Collection<String> databaseNames;
    try {
        databaseNames = Sets.newTreeSet(Resources.readLines(databasesUrl, Charsets.ISO_8859_1));
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }

    databaseNames = Collections2.transform(databaseNames, new Function<String, String>() {
        @Nullable
        @Override
        public String apply(@Nullable String input) {
            // {host_db_create,<<"tco_q5rm">>,<<"TCO_q5rm">>,

            if (input == null)
                return null;

            if (input.startsWith("#"))
                return null;

            if (input.indexOf('"') == -1) {
                logger.warn("Skip invalid line {}", input);
                return null;
            }
            input = input.substring(input.indexOf('"') + 1);
            if (input.indexOf('"') == -1) {
                logger.warn("Skip invalid line {}", input);
                return null;
            }
            return input.substring(0, input.indexOf('"'));

        }
    });
    databaseNames = Collections2.filter(databaseNames, new Predicate<String>() {
        @Override
        public boolean apply(@Nullable String s) {
            return !Strings.isNullOrEmpty(s);
        }
    });

    Multimap<String, String> databasesByAccount = ArrayListMultimap.create();

    Class.forName("com.mysql.jdbc.Driver");

    for (String databaseName : databaseNames) {
        try {
            DatabaseInfo databaseInfo = client.databaseInfo(databaseName, true);
            databasesByAccount.put(databaseInfo.getOwner(), databaseInfo.getName());
            logger.debug("Evaluate " + databaseInfo.getName());

            if (true == false) {
                // Hibernate
                logger.info("Hibernate {}", databaseName);
                Map<String, String> params = new HashMap<String, String>();
                params.put("database_id", databaseName);
                String url = client.getRequestURL("database.hibernate", params);
                String response = client.executeRequest(url);
                DatabaseInfoResponse apiResponse = (DatabaseInfoResponse) client.readResponse(response);
                logger.info("DB {} status: {}", apiResponse.getDatabaseInfo().getName(),
                        apiResponse.getDatabaseInfo().getStatus());

            }
            if (true == false) {
                // Hibernate
                logger.info("Activate {}", databaseName);
                Map<String, String> params = new HashMap<String, String>();
                params.put("database_id", databaseName);
                String url = client.getRequestURL("database.activate", params);
                String response = client.executeRequest(url);
                DatabaseInfoResponse apiResponse = (DatabaseInfoResponse) client.readResponse(response);
                logger.info("DB {} status: {}", apiResponse.getDatabaseInfo().getName(),
                        apiResponse.getDatabaseInfo().getStatus());
            }

            String dbUrl = "jdbc:mysql://" + databaseInfo.getMaster() + "/" + databaseInfo.getName();
            logger.info("Connect to {} user={}", dbUrl, databaseInfo.getUsername());
            Connection cnn = DriverManager.getConnection(dbUrl, databaseInfo.getUsername(),
                    databaseInfo.getPassword());
            cnn.setAutoCommit(false);
            cnn.close();

        } catch (Exception e) {
            logger.warn("Exception for {}", databaseName, e);
        }
    }

    System.out.println("OWNERS");
    for (String account : databasesByAccount.keySet()) {
        System.out.println(account + ": " + Joiner.on(", ").join(databasesByAccount.get(account)));
    }

}

From source file:ca.ualberta.physics.cssdp.vfs.ftp.VfsFtpServer.java

public static void main(String[] args) {

    FtpServerFactory serverFactory = new FtpServerFactory();
    CommandFactoryFactory commandFactoryFactory = new CommandFactoryFactory();
    CommandFactory commandFactory = commandFactoryFactory.createCommandFactory();
    serverFactory.setCommandFactory(commandFactory);

    // override default passive ports
    DataConnectionConfigurationFactory dataConfigurationFactory = new DataConnectionConfigurationFactory();
    dataConfigurationFactory.setPassivePorts("60200-60250");

    ListenerFactory factory = new ListenerFactory();
    factory.setDataConnectionConfiguration(dataConfigurationFactory.createDataConnectionConfiguration());

    // set the port of the listener

    int ftpPort = VfsServer.properties().getInt("ftpPort");
    factory.setPort(ftpPort);//from  ww w.  j  a  v a  2 s .  c  o m

    // replace the default listener
    serverFactory.addListener("default", factory.createListener());
    serverFactory.getFtplets().put("default", new VfsFtplet());
    serverFactory.setUserManager(new VfsFtpUserManager());

    server = serverFactory.createServer();
    // start the server
    try {
        server.start();
    } catch (FtpException e) {
        logger.error("Could not start Mina FTP Server", e);
        Throwables.propagate(e);
    }
}

From source file:co.cask.cdap.master.startup.MasterStartupTool.java

public static void main(String[] args) {

    CConfiguration cConf = CConfiguration.create();

    ConfigurationLogger.logImportantConfig(cConf);
    LOG.info("Hadoop subsystem versions:");
    LOG.info("  Hadoop version: {}", ClientVersions.getHadoopVersion());
    LOG.info("  HBase version: {}", ClientVersions.getHBaseVersion());
    LOG.info("  ZooKeeper version: {}", ClientVersions.getZooKeeperVersion());
    LOG.info("  Kafka version: {}", ClientVersions.getKafkaVersion());
    if (cConf.getBoolean(Constants.Explore.EXPLORE_ENABLED)) {
        LOG.info("  Hive version: {}", ExploreServiceUtils.getHiveVersion());
    }//from  w w w .ja  v  a2 s .c  om
    LOG.info("CDAP version: {}", ClientVersions.getCdapVersion());
    LOG.info("CDAP HBase compat version: {}", ClientVersions.getCdapHBaseCompatVersion());
    LOG.info("Tephra HBase compat version: {}", ClientVersions.getTephraHBaseCompatVersion());

    if (!cConf.getBoolean(Constants.Startup.CHECKS_ENABLED)) {
        return;
    }

    try {
        SecurityUtil.loginForMasterService(cConf);
    } catch (Exception e) {
        LOG.error("Failed to login as CDAP user", e);
        throw Throwables.propagate(e);
    }

    Configuration hConf = HBaseConfiguration.create();

    MasterStartupTool masterStartupTool = new MasterStartupTool(createInjector(cConf, hConf));
    if (!masterStartupTool.canStartMaster()) {
        System.exit(1);
    }
}

From source file:com.zulily.omicron.Main.java

public static void main(final String[] args) {

    if (args == null || (args.length > 0 && args[0].contains("?"))) {
        printHelp();/*from w  w w  . jav  a 2  s .  c o  m*/
        System.exit(0);
    }

    // see doc for java.util.logging.SimpleFormatter
    // log output will look like:
    // [Tue Dec 16 10:29:07 PST 2014] INFO: <message>
    System.setProperty("java.util.logging.SimpleFormatter.format", DEFAULT_LOG_FORMAT);

    try {

        Configuration configuration = new Configuration(args.length > 0 ? args[0].trim() : DEFAULT_CONFIG_PATH);

        Crontab crontab = new Crontab(configuration);

        final JobManager jobManager = new JobManager(configuration, crontab);

        // The minute logic is intended to stay calibrated
        // with the current calendar minute.
        // Scheduled jobs should run as close to second-of-minute == 0 as possible
        // while minimizing acquired execution drift over time, or possible hangups
        // from scheduling to the "next calendar time of hh:mm:ss" considering DST or leap-seconds, etc.

        long targetExecuteMinute = getTargetMinuteMillisFromNow(1);

        // We're going to loop forever until the process is killed explicitly - please stop the warnings
        //noinspection InfiniteLoopStatement
        while (true) {

            long currentExecuteMinute = getTargetMinuteMillisFromNow(0);

            // We want to trigger tasks when the execute minute comes up or is past-due
            // so we use < instead of == to set it off "fuzzily."
            // Until then watch for crontab/conf changes or sleep
            while (currentExecuteMinute < targetExecuteMinute) {

                if (configurationUpdated(crontab, configuration)) {

                    info("Either configuration or crontab updated. Reloading task configurations.");

                    configuration = configuration.reload();
                    crontab = new Crontab(configuration);

                    jobManager.updateConfiguration(configuration, crontab);
                }

                try {

                    Thread.sleep(TimeUnit.SECONDS.toMillis(1));

                } catch (InterruptedException e) {
                    throw Throwables.propagate(e);
                }

                currentExecuteMinute = getTargetMinuteMillisFromNow(0);

            }

            // Due to drift, initial start time, or due to the length of
            // time it takes to read crontab/config changes,
            // we may actually pass a target calendar minute without evaluation
            // of the scheduled task list
            //
            // The current implementation of crond never evaluates the current minute
            // that it detects schedule changes. For now I'm calling that
            // particular case "expected behavior" and just warning
            if (currentExecuteMinute != targetExecuteMinute) {
                warn("Scheduled tasks may have been missed due to missed minute target {0}",
                        String.valueOf(targetExecuteMinute));
            }

            // Set for re-evaluation in the next calendar minute
            targetExecuteMinute = getTargetMinuteMillisFromNow(1);

            jobManager.run();
        }

    } catch (Exception e) {
        error("Caught exception in primary thread:\n{0}\n", Throwables.getStackTraceAsString(e));
        System.exit(1);
    }

    System.exit(0);
}

From source file:com.treasuredata.client.Example.java

public static void main(String[] args) {
    TDClient client = TDClient.newClient();
    try {/*w w w  .  j a v  a2s .c o m*/
        // Retrieve database and table names
        List<TDDatabase> databases = client.listDatabases();
        TDDatabase db = databases.get(0);
        System.out.println("database: " + db.getName());
        for (TDTable table : client.listTables(db.getName())) {
            System.out.println(" table: " + table);
        }

        // Submit a new Presto query
        String jobId = client
                .submit(TDJobRequest.newPrestoQuery("sample_datasets", "select count(1) cnt from www_access"));

        // Wait until the query finishes
        ExponentialBackOff backOff = new ExponentialBackOff();
        TDJobSummary job = client.jobStatus(jobId);
        while (!job.getStatus().isFinished()) {
            Thread.sleep(backOff.nextWaitTimeMillis());
            job = client.jobStatus(jobId);
        }

        // Read the detailed job information
        TDJob jobInfo = client.jobInfo(jobId);
        System.out.println("log:\n" + jobInfo.getCmdOut());
        System.out.println("error log:\n" + jobInfo.getStdErr());

        // Read the job results in msgpack.gz format
        client.jobResult(jobId, TDResultFormat.MESSAGE_PACK_GZ, new Function<InputStream, Integer>() {
            @Override
            public Integer apply(InputStream input) {
                int count = 0;
                try {
                    MessageUnpacker unpacker = MessagePack.newDefaultUnpacker(new GZIPInputStream(input));
                    while (unpacker.hasNext()) {
                        // Each row of the query result is array type value (e.g., [1, "name", ...])
                        ArrayValue array = unpacker.unpackValue().asArrayValue();
                        System.out.println(array);
                        count++;
                    }
                    unpacker.close();
                } catch (Exception e) {
                    throw Throwables.propagate(e);
                }
                return count;
            }
        });
    } catch (Exception e) {
        e.printStackTrace();
    } finally {
        client.close();
    }
}

From source file:com.arpnetworking.clusteraggregator.Main.java

/**
 * Entry point.// w  w  w .j  a v a  2s . co m
 *
 * @param args command line arguments
 */
public static void main(final String[] args) {
    Thread.setDefaultUncaughtExceptionHandler((thread, throwable) -> {
        LOGGER.error().setMessage("Unhandled exception!").setThrowable(throwable).log();
    });

    Thread.currentThread().setUncaughtExceptionHandler((thread, throwable) -> {
        LOGGER.error().setMessage("Unhandled exception!").setThrowable(throwable).log();
    });

    LOGGER.info().setMessage("Launching cluster-aggregator").log();

    Runtime.getRuntime().addShutdownHook(SHUTDOWN_THREAD);

    if (args.length != 1) {
        throw new RuntimeException("No configuration file specified");
    }

    LOGGER.debug().setMessage("Loading configuration from file").addData("file", args[0]).log();

    Optional<DynamicConfiguration> configuration = Optional.absent();
    Optional<Configurator<Main, ClusterAggregatorConfiguration>> configurator = Optional.absent();
    try {
        final File configurationFile = new File(args[0]);
        configurator = Optional.of(new Configurator<>(Main::new, ClusterAggregatorConfiguration.class));
        final ObjectMapper objectMapper = ClusterAggregatorConfiguration.createObjectMapper();
        configuration = Optional.of(new DynamicConfiguration.Builder().setObjectMapper(objectMapper)
                .addSourceBuilder(new JsonNodeFileSource.Builder().setObjectMapper(objectMapper)
                        .setFile(configurationFile))
                .addTrigger(new FileTrigger.Builder().setFile(configurationFile).build())
                .addListener(configurator.get()).build());

        configuration.get().launch();

        // Wait for application shutdown
        SHUTDOWN_SEMAPHORE.acquire();
    } catch (final InterruptedException e) {
        throw Throwables.propagate(e);
    } finally {
        if (configurator.isPresent()) {
            configurator.get().shutdown();
        }
        if (configuration.isPresent()) {
            configuration.get().shutdown();
        }
        // Notify the shutdown that we're done
        SHUTDOWN_SEMAPHORE.release();
    }
}

From source file:org.apache.cassandra.tools.SSTableOfflineRelevel.java

/**
 * @param args a list of sstables whose metadata we are changing
 *///  ww  w .j  a va 2 s  .c  o m
public static void main(String[] args) throws IOException {
    PrintStream out = System.out;
    if (args.length < 2) {
        out.println("This command should be run with Cassandra stopped!");
        out.println("Usage: sstableofflinerelevel [--dry-run] <keyspace> <columnfamily>");
        System.exit(1);
    }

    Util.initDatabaseDescriptor();

    boolean dryRun = args[0].equals("--dry-run");
    String keyspace = args[args.length - 2];
    String columnfamily = args[args.length - 1];
    Schema.instance.loadFromDisk(false);

    if (Schema.instance.getCFMetaData(keyspace, columnfamily) == null)
        throw new IllegalArgumentException(
                String.format("Unknown keyspace/columnFamily %s.%s", keyspace, columnfamily));

    Keyspace ks = Keyspace.openWithoutSSTables(keyspace);
    ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnfamily);
    Directories.SSTableLister lister = cfs.directories.sstableLister().skipTemporary(true);
    Set<SSTableReader> sstables = new HashSet<>();
    for (Map.Entry<Descriptor, Set<Component>> sstable : lister.list().entrySet()) {
        if (sstable.getKey() != null) {
            try {
                SSTableReader reader = SSTableReader.open(sstable.getKey());
                sstables.add(reader);
            } catch (Throwable t) {
                out.println("Couldn't open sstable: " + sstable.getKey().filenameFor(Component.DATA));
                Throwables.propagate(t);
            }
        }
    }
    if (sstables.isEmpty()) {
        out.println("No sstables to relevel for " + keyspace + "." + columnfamily);
        System.exit(1);
    }
    Relevel rl = new Relevel(sstables);
    rl.relevel(dryRun);
    System.exit(0);

}

From source file:com.arpnetworking.metrics.mad.Main.java

/**
 * Entry point for Metrics Aggregator Daemon (MAD).
 *
 * @param args the command line arguments
 *///from   w w  w .j ava 2 s. c o  m
public static void main(final String[] args) {
    // Global initialization
    Thread.setDefaultUncaughtExceptionHandler((thread, throwable) -> {
        System.err.println("Unhandled exception! exception: " + throwable.toString());
        throwable.printStackTrace(System.err);
    });

    Thread.currentThread().setUncaughtExceptionHandler((thread, throwable) -> LOGGER.error()
            .setMessage("Unhandled exception!").setThrowable(throwable).log());

    LOGGER.info().setMessage("Launching mad").log();

    Runtime.getRuntime().addShutdownHook(SHUTDOWN_THREAD);

    System.setProperty("org.vertx.logger-delegate-factory-class-name",
            "org.vertx.java.core.logging.impl.SLF4JLogDelegateFactory");

    // Run the tsd aggregator
    if (args.length != 1) {
        throw new RuntimeException("No configuration file specified");
    }
    LOGGER.debug().setMessage("Loading configuration").addData("file", args[0]).log();

    Optional<DynamicConfiguration> configuration = Optional.empty();
    Optional<Configurator<Main, AggregatorConfiguration>> configurator = Optional.empty();
    try {
        final File configurationFile = new File(args[0]);
        configurator = Optional.of(new Configurator<>(Main::new, AggregatorConfiguration.class));
        configuration = Optional.of(new DynamicConfiguration.Builder().setObjectMapper(OBJECT_MAPPER)
                .addSourceBuilder(getFileSourceBuilder(configurationFile))
                .addTrigger(new FileTrigger.Builder().setFile(configurationFile).build())
                .addListener(configurator.get()).build());

        configuration.get().launch();
        // Wait for application shutdown
        SHUTDOWN_SEMAPHORE.acquire();
    } catch (final InterruptedException e) {
        throw Throwables.propagate(e);
    } finally {
        if (configurator.isPresent()) {
            configurator.get().shutdown();
        }
        if (configuration.isPresent()) {
            configuration.get().shutdown();
        }
        // Notify the shutdown that we're done
        SHUTDOWN_SEMAPHORE.release();
    }
}

From source file:org.voltdb.utils.Collector.java

public static void main(String[] args) {
    m_voltDbRootPath = args[0];// ww  w  .j  a v a  2s.  c  o m
    m_uniqueid = args[1];
    m_host = args[2];
    m_username = args[3];
    m_password = args[4];
    m_noPrompt = Boolean.parseBoolean(args[5]);
    m_dryRun = Boolean.parseBoolean(args[6]);
    m_noHeapDump = Boolean.parseBoolean(args[7]);

    // arguments only used when Collector is called from VEM
    if (args.length > 8) {
        // generate resulting file in voltdbroot instead of current working dir and do not append timestamp in filename
        // so the resulting file is easier to be located and copied to VEM
        m_calledFromVEM = Boolean.parseBoolean(args[8]);

        // generate a list of information (server name, size, and path) of files rather than actually collect files
        // used by files display panel in VEM UI
        m_fileInfoOnly = Boolean.parseBoolean(args[9]);
    }

    File voltDbRoot = new File(m_voltDbRootPath);
    if (!voltDbRoot.exists()) {
        System.err.println("voltdbroot path '" + m_voltDbRootPath + "' does not exist.");
        System.exit(-1);
    }

    locatePaths(m_voltDbRootPath);

    JSONObject jsonObject = parseJSONFile(m_configInfoPath);
    parseJSONObject(jsonObject);

    List<String> collectionFilesList = listCollection(m_noHeapDump);

    if (m_dryRun) {
        System.out.println("List of the files to be collected:");
        for (String path : collectionFilesList) {
            System.out.println("  " + path);
        }
        System.out.println("[dry-run] A tgz file containing above files would be generated in current dir");
        System.out.println("          Use --upload option to enable uploading via SFTP");
    } else if (m_fileInfoOnly) {
        String collectionFilesListPath = m_voltDbRootPath + File.separator + m_uniqueid;

        byte jsonBytes[] = null;
        try {
            JSONStringer stringer = new JSONStringer();

            stringer.object();
            stringer.key("server").value(m_uniqueid);
            stringer.key("files").array();
            for (String path : collectionFilesList) {
                stringer.object();
                stringer.key("filename").value(path);
                if (Arrays.asList(cmdFilenames).contains(path.split(" ")[0])) {
                    stringer.key("size").value(0);
                } else {
                    stringer.key("size").value(new File(path).length());
                }
                stringer.endObject();
            }
            stringer.endArray();
            stringer.endObject();

            JSONObject jsObj = new JSONObject(stringer.toString());
            jsonBytes = jsObj.toString(4).getBytes(Charsets.UTF_8);
        } catch (JSONException e) {
            Throwables.propagate(e);
        }

        FileOutputStream fos = null;
        try {
            fos = new FileOutputStream(collectionFilesListPath);
            fos.write(jsonBytes);
            fos.getFD().sync();
        } catch (IOException e) {
            Throwables.propagate(e);
        } finally {
            try {
                fos.close();
            } catch (IOException e) {
                Throwables.propagate(e);
            }
        }
    } else {
        generateCollection(collectionFilesList, m_calledFromVEM);
    }
}

From source file:com.wamad.IRCConnectionExample.java

public static void main(String[] args) throws InterruptedException {
    Stopwatch timer = new Stopwatch().start();
    IRCConnection conn = new IRCConnection("irc.freenode.net", 6667, 6669, null, "Foo-bot", "Mr-Foobar",
            "foo@bar.com");

    final PrintStream out = System.out;

    conn.addIRCEventListener(new IRCEventListener() {
        private Stopwatch stopwatch = new Stopwatch().start();

        public void onRegistered() {
            out.println("Registered");
            out.println("time (" + stopwatch.elapsedMillis());
        }//from  w ww  . j a v  a  2 s.co m

        public void onDisconnected() {
            out.println("Disconnected");
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onError(final String msg) {
            out.println("Error: " + msg);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onError(final int num, final String msg) {
            out.println("Error: num (" + num + "), msg (" + msg + ")");
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onInvite(final String chan, final IRCUser user, final String passiveNick) {
            out.println("invite: chan (" + chan + "), user(" + user + "), pass(" + passiveNick + ")");
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onJoin(final String chan, final IRCUser user) {
            out.println("join - " + chan + ", " + user);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onKick(final String chan, final IRCUser user, final String passiveNick, final String msg) {
            out.println("kick - " + chan + ", " + user + ", " + passiveNick + ", " + msg);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onMode(final String chan, final IRCUser user, final IRCModeParser modeParser) {
            out.println("mode - " + chan + ", " + user + ", " + modeParser);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onMode(final IRCUser user, final String passiveNick, final String mode) {
            out.println("mode - " + user + ", " + passiveNick + ", " + mode);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onNick(final IRCUser user, final String newNick) {
            out.println("Nikc - " + user + ", " + newNick);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onNotice(final String target, final IRCUser user, final String msg) {
            out.println("notice - " + target + ", " + user + ", " + msg);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onPart(final String chan, final IRCUser user, final String msg) {
            out.println("part - " + chan + ", " + user + ", " + msg);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onPing(final String ping) {
            out.println("ping - " + ping);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onPrivmsg(final String target, final IRCUser user, final String msg) {
            out.println("privmsg - " + target + ", " + user + ", " + msg);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onQuit(final IRCUser user, final String msg) {
            out.println("quit - " + user + ", " + msg);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onReply(final int num, final String value, final String msg) {
            out.println("reply - " + num + ", " + value + ", " + msg);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void onTopic(final String chan, final IRCUser user, final String topic) {
            out.println("topic - " + chan + ", " + user + ", " + topic);
            out.println("time (" + stopwatch.elapsedMillis());
        }

        public void unknown(final String prefix, final String command, final String middle,
                final String trailing) {
            out.println("unknown - " + prefix + ", " + command + ", " + middle + ", " + trailing);
            out.println("time (" + stopwatch.elapsedMillis());
        }
    });
    conn.setDaemon(true);
    conn.setColors(false);
    conn.setPong(true);

    try {
        out.println("Connecting");
        conn.connect();
        out.println("Connected?");

        conn.doJoin("##gibson");

        System.out.println("Time taken: " + timer.elapsedMillis());

        while (true) {
            conn.doPrivmsg("the_minh_net", "YOU ARE THE MINH");
            conn.doPrivmsg("##gibson", "Testing");
            TimeUnit.SECONDS.sleep(5);
        }

        //      conn.run();
    } catch (IOException e) {
        Throwables.propagate(e);
    }
}