Example usage for org.apache.commons.lang StringUtils leftPad

List of usage examples for org.apache.commons.lang StringUtils leftPad

Introduction

In this page you can find the example usage for org.apache.commons.lang StringUtils leftPad.

Prototype

public static String leftPad(String str, int size) 

Source Link

Document

Left pad a String with spaces (' ').

Usage

From source file:org.apache.cocoon.util.log.ExtensiblePatternFormatter.java

/**
 * Utility to append a string to buffer given certain constraints.
 *
 * @param sb the StringBuffer//from  ww  w .  j a v  a 2 s. c  o  m
 * @param minSize the minimum size of output (0 to ignore)
 * @param maxSize the maximum size of output (0 to ignore)
 * @param rightJustify true if the string is to be right justified in it's box.
 * @param output the input string
 */
protected void append(final StringBuffer sb, final int minSize, final int maxSize, final boolean rightJustify,
        final String output) {
    if (output.length() < minSize) {
        if (rightJustify) {
            sb.append(StringUtils.leftPad(output, minSize));
        } else {
            sb.append(StringUtils.rightPad(output, minSize));
        }
    } else if (maxSize > 0) {
        if (rightJustify) {
            sb.append(StringUtils.right(output, maxSize));
        } else {
            sb.append(StringUtils.left(output, maxSize));
        }
    } else {
        sb.append(output);
    }
}

From source file:org.apache.hadoop.hbase.test.MultiThreadedMultiClusterWithCmApiTest.java

public static void main(String[] args) throws Exception {
    if (args.length == 0) {

        System.out.println("RunMultiClusterTest " + "<CM-Host-1> " + "<UserName> " + "<Password> "
                + "<Cluster-1> " + "<HBase-Service-1> " + "<CM-Host-2> " + "<UserName-2> " + "<Password-2> "
                + "<Cluster-2> " + "<HBase-Service-2> " + "<tableName> " + "<familyName> " + "<numberOfPuts> "
                + "<millisecond of wait> " + "<numberOfThreads> " + "<outputCsvFile>");
    }//from   www .  ja  va2s .  c o  m

    final String cmHost1 = args[0];
    final String username1 = args[1];
    final String password1 = args[2];
    final String cluster1 = args[3];
    final String hbaseService1 = args[4];
    final String cmHost2 = args[5];
    final String username2 = args[6];
    final String password2 = args[7];
    final String cluster2 = args[8];
    final String hbaseService2 = args[9];

    LOG.info("--Getting Configurations");

    Configuration config = HBaseMultiClusterConfigUtil.combineConfigurations(cmHost1, username1, password1,
            cluster1, hbaseService1, cmHost2, username2, password2, cluster2, hbaseService2);

    LOG.info("--Got Configuration");

    final String tableName = args[10];
    final String familyName = args[11];
    final int numberOfPuts = Integer.parseInt(args[12]);
    final int millisecondToWait = Integer.parseInt(args[13]);
    final int numberOfThreads = Integer.parseInt(args[14]);
    final String outputCsvFile = args[15];

    LOG.info("Getting HAdmin");

    LOG.info(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG + ": "
            + config.get(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG));
    LOG.info("hbase.zookeeper.quorum: " + config.get("hbase.zookeeper.quorum"));
    LOG.info("hbase.failover.cluster.fail1.hbase.hstore.compaction.max: "
            + config.get("hbase.failover.cluster.fail1.hbase.hstore.compaction.max"));

    HBaseAdmin admin = new HBaseAdminMultiCluster(config);

    try {
        if (admin.tableExists(TableName.valueOf(tableName))) {
            try {
                admin.disableTable(TableName.valueOf(tableName));
            } catch (Exception e) {
                //nothing
            }
            admin.deleteTable(TableName.valueOf(tableName));
        }
    } catch (Exception e) {
        e.printStackTrace();
    }

    LOG.info(" - Got HAdmin:" + admin.getClass());

    HTableDescriptor tableD = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor columnD = new HColumnDescriptor(Bytes.toBytes(familyName));
    tableD.addFamily(columnD);

    byte[][] splitKeys = new byte[10][1];
    splitKeys[0][0] = '0';
    splitKeys[1][0] = '1';
    splitKeys[2][0] = '2';
    splitKeys[3][0] = '3';
    splitKeys[4][0] = '4';
    splitKeys[5][0] = '5';
    splitKeys[6][0] = '6';
    splitKeys[7][0] = '7';
    splitKeys[8][0] = '8';
    splitKeys[9][0] = '9';

    LOG.info(" - About to create Table " + tableD.getName());

    admin.createTable(tableD, splitKeys);

    LOG.info(" - Created Table " + tableD.getName());

    LOG.info("Getting HConnection");

    config.set("hbase.client.retries.number", "1");
    config.set("hbase.client.pause", "1");

    final HConnection connection = HConnectionManagerMultiClusterWrapper.createConnection(config);

    LOG.info(" - Got HConnection: " + connection.getClass());

    LOG.info("Getting HTable");

    final AtomicInteger threadFinishCounter = new AtomicInteger(0);

    //Make sure output folder exist
    File outputFolder = new File(outputCsvFile);
    if (outputFolder.exists() == false) {
        outputFolder.mkdirs();
    }

    for (int threadNum = 0; threadNum < numberOfThreads; threadNum++) {

        final BufferedWriter writer = new BufferedWriter(
                new FileWriter(new File(outputCsvFile + "/thread-" + threadNum + ".csv")));

        final int threadFinalNum = threadNum;

        Thread t = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    Random r = new Random();
                    HTableInterface table = connection.getTable(tableName);
                    HTableStats stats = ((HTableMultiCluster) table).getStats();
                    stats.printStats(writer, 5000);

                    for (int i = 1; i <= numberOfPuts; i++) {

                        int hash = r.nextInt(10);

                        Put put = new Put(Bytes.toBytes(hash + ".key." + i + "."
                                + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        put.add(Bytes.toBytes(familyName), Bytes.toBytes("C"),
                                Bytes.toBytes("Value:" + i * threadFinalNum));
                        table.put(put);

                        Thread.sleep(millisecondToWait);

                        Get get = new Get(Bytes.toBytes(
                                hash + ".key." + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        table.get(get);

                        Thread.sleep(millisecondToWait);

                        //Delete delete = new Delete(Bytes.toBytes(hash + ".key." + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        //table.delete(delete);

                        Thread.sleep(millisecondToWait);

                        if (i % 10 == 0) {
                            writeToSystemOut("{thread:" + threadFinalNum + ",count=" + i + "}", true);
                        } else if (numberOfPuts % 1000 == 0) {
                            writeToSystemOut(".", false);
                        }
                    }
                    stats.stopPrintingStats();
                } catch (Exception e) {
                    e.printStackTrace();
                } finally {
                    threadFinishCounter.incrementAndGet();
                    try {
                        writer.close();
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }

            }
        });
        t.start();
    }

    while (threadFinishCounter.get() < numberOfThreads) {
        Thread.sleep(millisecondToWait * 10);
    }

    //admin.disableTable(TableName.valueOf(tableName));
    //admin.deleteTable(TableName.valueOf(tableName));

    System.out.println("close connection");
    connection.close();
    System.out.println("close admin");
    admin.close();
    System.out.println("done");
    System.exit(0);
}

From source file:org.apache.hadoop.hbase.test.MultiThreadedMultiClusterWithCombinedFileTest.java

public static void main(String[] args) throws Exception {
    if (args.length == 0) {

        System.out.println("RunMultiClusterTest " + "<combined file> " + "<tableName> " + "<familyName> "
                + "<numberOfPuts> " + "<millisecond of wait> " + "<numberOfThreads> " + "<outputCsvFile>");
    }/* w w  w  .ja va  2s .com*/

    final String combinedFilePath = args[0];

    System.out.println("--Getting Configurations");

    Configuration config = HBaseConfiguration.create();
    config.addResource(new FileInputStream(combinedFilePath));

    System.out.println("--Got Configuration");

    final String tableName = args[1];
    final String familyName = args[2];
    final int numberOfPuts = Integer.parseInt(args[3]);
    final int millisecondToWait = Integer.parseInt(args[4]);
    final int numberOfThreads = Integer.parseInt(args[5]);
    final String outputCsvFile = args[6];

    System.out.println("Getting HAdmin");

    System.out.println(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG + ": "
            + config.get(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG));
    System.out.println("hbase.zookeeper.quorum: " + config.get("hbase.zookeeper.quorum"));
    System.out.println("hbase.failover.cluster.fail1.hbase.hstore.compaction.max: "
            + config.get("hbase.failover.cluster.fail1.hbase.hstore.compaction.max"));

    HBaseAdmin admin = new HBaseAdminMultiCluster(config);

    try {
        admin.disableTable(TableName.valueOf(tableName));
        admin.deleteTable(TableName.valueOf(tableName));
    } catch (Exception e) {
        e.printStackTrace();
    }

    System.out.println(" - Got HAdmin:" + admin.getClass());

    HTableDescriptor tableD = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor columnD = new HColumnDescriptor(Bytes.toBytes(familyName));
    tableD.addFamily(columnD);

    byte[][] splitKeys = new byte[10][1];
    splitKeys[0][0] = '0';
    splitKeys[1][0] = '1';
    splitKeys[2][0] = '2';
    splitKeys[3][0] = '3';
    splitKeys[4][0] = '4';
    splitKeys[5][0] = '5';
    splitKeys[6][0] = '6';
    splitKeys[7][0] = '7';
    splitKeys[8][0] = '8';
    splitKeys[9][0] = '9';

    admin.createTable(tableD, splitKeys);

    System.out.println("Getting HConnection");

    config.set("hbase.client.retries.number", "1");
    config.set("hbase.client.pause", "1");

    final HConnection connection = HConnectionManagerMultiClusterWrapper.createConnection(config);

    System.out.println(" - Got HConnection: " + connection.getClass());

    System.out.println("Getting HTable");

    final AtomicInteger threadFinishCounter = new AtomicInteger(0);

    for (int threadNum = 0; threadNum < numberOfThreads; threadNum++) {

        final BufferedWriter writer = new BufferedWriter(
                new FileWriter(outputCsvFile + "/thread-" + threadNum + ".csv"));

        final int threadFinalNum = threadNum;

        Thread t = new Thread(new Runnable() {
            @Override
            public void run() {
                try {
                    Random r = new Random();
                    for (int i = 1; i <= numberOfPuts; i++) {
                        HTableInterface table = connection.getTable(tableName);
                        HTableStats stats = ((HTableMultiCluster) table).getStats();
                        stats.printStats(writer, 5000);

                        int hash = r.nextInt(10);

                        Put put = new Put(Bytes.toBytes(
                                hash + ".key." + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        put.add(Bytes.toBytes(familyName), Bytes.toBytes("C"),
                                Bytes.toBytes("Value:" + i * threadFinalNum));
                        table.put(put);

                        Thread.sleep(millisecondToWait);

                        Get get = new Get(Bytes.toBytes(
                                hash + ".key." + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        table.get(get);

                        Thread.sleep(millisecondToWait);

                        Delete delete = new Delete(Bytes.toBytes(
                                hash + ".key." + StringUtils.leftPad(String.valueOf(i * threadFinalNum), 12)));
                        table.delete(delete);

                        Thread.sleep(millisecondToWait);

                        if (numberOfPuts % 10000 == 0) {
                            writeToSystemOut("{thread:" + threadFinalNum + ",count=" + i + "}", true);
                        } else if (numberOfPuts % 1000 == 0) {
                            writeToSystemOut(".", false);
                        }
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                } finally {
                    threadFinishCounter.incrementAndGet();
                    try {
                        writer.close();
                    } catch (IOException e) {
                        e.printStackTrace();
                    }
                }
            }
        });
        t.start();
    }

    while (threadFinishCounter.get() < numberOfThreads) {
        Thread.sleep(millisecondToWait * 10);
    }

    admin.disableTable(TableName.valueOf(tableName));
    admin.deleteTable(TableName.valueOf(tableName));

    connection.close();
    admin.close();
}

From source file:org.apache.hadoop.hbase.test.RunMultiClusterTest.java

public static void main(String[] args) throws Exception {
    if (args.length == 0) {
        System.out.println(//from  w w  w .ja  v  a2s.com
                "RunMultiClusterTest <core-site file> <hbase-site file> <hdfs-site file> <tableName> <familyName> <numberOfPuts> <millisecond of wait> <outputCsvFile>");
    }

    Configuration config = HBaseConfiguration.create();
    config.addResource(new FileInputStream(new File(args[0])));
    config.addResource(new FileInputStream(new File(args[1])));
    config.addResource(new FileInputStream(new File(args[2])));

    String tableName = args[3];
    String familyName = args[4];
    int numberOfPuts = Integer.parseInt(args[5]);
    int secondsOfWait = Integer.parseInt(args[6]);
    String outputCsvFile = args[7];

    System.out.println("Getting HAdmin");

    System.out.println(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG + ": "
            + config.get(ConfigConst.HBASE_FAILOVER_CLUSTERS_CONFIG));
    System.out.println("hbase.zookeeper.quorum: " + config.get("hbase.zookeeper.quorum"));
    System.out.println("hbase.failover.cluster.fail1.hbase.hstore.compaction.max: "
            + config.get("hbase.failover.cluster.fail1.hbase.hstore.compaction.max"));

    HBaseAdmin admin = new HBaseAdminMultiCluster(config);

    try {
        admin.disableTable(TableName.valueOf(tableName));
        admin.deleteTable(TableName.valueOf(tableName));
    } catch (Exception e) {
        e.printStackTrace();
    }

    System.out.println(" - Got HAdmin:" + admin.getClass());

    HTableDescriptor tableD = new HTableDescriptor(TableName.valueOf(tableName));
    HColumnDescriptor columnD = new HColumnDescriptor(Bytes.toBytes(familyName));
    tableD.addFamily(columnD);

    byte[][] splitKeys = new byte[10][1];
    splitKeys[0][0] = '0';
    splitKeys[1][0] = '1';
    splitKeys[2][0] = '2';
    splitKeys[3][0] = '3';
    splitKeys[4][0] = '4';
    splitKeys[5][0] = '5';
    splitKeys[6][0] = '6';
    splitKeys[7][0] = '7';
    splitKeys[8][0] = '8';
    splitKeys[9][0] = '9';

    admin.createTable(tableD, splitKeys);

    System.out.println("Getting HConnection");

    config.set("hbase.client.retries.number", "1");
    config.set("hbase.client.pause", "1");

    HConnection connection = HConnectionManagerMultiClusterWrapper.createConnection(config);

    System.out.println(" - Got HConnection: " + connection.getClass());

    System.out.println("Getting HTable");

    HTableInterface table = connection.getTable(tableName);

    System.out.println("Got HTable: " + table.getClass());

    BufferedWriter writer = new BufferedWriter(new FileWriter(outputCsvFile));

    HTableStats.printCSVHeaders(writer);

    for (int i = 1; i <= numberOfPuts; i++) {
        System.out.print("p");
        Put put = new Put(Bytes.toBytes(i % 10 + ".key." + StringUtils.leftPad(String.valueOf(i), 12)));
        put.add(Bytes.toBytes(familyName), Bytes.toBytes("C"), Bytes.toBytes("Value:" + i));
        table.put(put);

        System.out.print("g");
        Get get = new Get(Bytes.toBytes(i % 10 + ".key." + StringUtils.leftPad(String.valueOf(i), 12)));
        table.get(get);

        System.out.print("d");
        Delete delete = new Delete(
                Bytes.toBytes(i % 10 + ".key." + StringUtils.leftPad(String.valueOf(i), 12)));
        table.delete(delete);

        System.out.print(".");
        if (i % 100 == 0) {
            System.out.println("|");
            HTableStats stats = ((HTableMultiCluster) table).getStats();
            stats.printPrettyStats();
            stats.printCSVStats(writer);
        }
        //secondsOfWait
        Thread.sleep(secondsOfWait);
    }

    writer.close();

    admin.disableTable(TableName.valueOf(tableName));
    admin.deleteTable(TableName.valueOf(tableName));

    connection.close();
    admin.close();
}

From source file:org.apache.hive.jdbc.TestJdbcWithMiniHS2.java

/**
 * Test for http header size/*  w  w w .j a v  a2  s .c  o  m*/
 * @throws Exception
 */
@Test
public void testHttpHeaderSize() throws Exception {
    // Stop HiveServer2
    stopMiniHS2();
    HiveConf conf = new HiveConf();
    conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE, 1024);
    conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE, 1024);
    startMiniHS2(conf, true);

    // Username is added to the request header
    String userName = StringUtils.leftPad("*", 100);
    Connection conn = null;
    // This should go fine, since header should be less than the configured header size
    try {
        conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, "password");
    } catch (Exception e) {
        fail("Not expecting exception: " + e);
    } finally {
        if (conn != null) {
            conn.close();
        }
    }

    // This should fail with given HTTP response code 413 in error message, since header is more
    // than the configured the header size
    userName = StringUtils.leftPad("*", 2000);
    Exception headerException = null;
    try {
        conn = null;
        conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, "password");
    } catch (Exception e) {
        headerException = e;
    } finally {
        if (conn != null) {
            conn.close();
        }

        assertTrue("Header exception should be thrown", headerException != null);
        assertTrue("Incorrect HTTP Response:" + headerException.getMessage(),
                headerException.getMessage().contains("HTTP Response code: 413"));
    }

    // Stop HiveServer2 to increase header size
    stopMiniHS2();
    conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE, 3000);
    conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE, 3000);
    startMiniHS2(conf);

    // This should now go fine, since we increased the configured header size
    try {
        conn = null;
        conn = getConnection(miniHS2.getJdbcURL(testDbName), userName, "password");
    } catch (Exception e) {
        fail("Not expecting exception: " + e);
    } finally {
        if (conn != null) {
            conn.close();
        }
    }

    // Restore original state
    restoreMiniHS2AndConnections();
}

From source file:org.apache.mahout.utils.vectors.io.AbstractClusterWriter.java

public static String getTopFeatures(Vector vector, String[] dictionary, int numTerms) {

    List<TermIndexWeight> vectorTerms = Lists.newArrayList();

    Iterator<Vector.Element> iter = vector.iterateNonZero();
    while (iter.hasNext()) {
        Vector.Element elt = iter.next();
        vectorTerms.add(new TermIndexWeight(elt.index(), elt.get()));
    }/* w  w w  .  jav a  2 s.com*/

    // Sort results in reverse order (ie weight in descending order)
    Collections.sort(vectorTerms, new Comparator<TermIndexWeight>() {
        @Override
        public int compare(TermIndexWeight one, TermIndexWeight two) {
            return Double.compare(two.weight, one.weight);
        }
    });

    Collection<Pair<String, Double>> topTerms = new LinkedList<Pair<String, Double>>();

    for (int i = 0; i < vectorTerms.size() && i < numTerms; i++) {
        int index = vectorTerms.get(i).index;
        String dictTerm = dictionary[index];
        if (dictTerm == null) {
            log.error("Dictionary entry missing for {}", index);
            continue;
        }
        topTerms.add(new Pair<String, Double>(dictTerm, vectorTerms.get(i).weight));
    }

    StringBuilder sb = new StringBuilder(100);

    for (Pair<String, Double> item : topTerms) {
        String term = item.getFirst();
        sb.append("\n\t\t");
        sb.append(StringUtils.rightPad(term, 40));
        sb.append("=>");
        sb.append(StringUtils.leftPad(item.getSecond().toString(), 20));
    }
    return sb.toString();
}

From source file:org.apache.oodt.cas.workflow.gui.WorkflowGUI.java

private void updateWorkspaceText() {
    if (this.workspace == null) {
        this.setTitle(null);
    } else {/*w  w  w  .j av a2 s  .com*/
        this.setTitle(StringUtils.leftPad("Workspace: " + this.workspace, 100));
    }
}

From source file:org.apache.sqoop.manager.oracle.OraOopUtilities.java

public static String padLeft(String s, int n) {
    return StringUtils.leftPad(s, n);
}

From source file:org.batoo.jpa.benchmark.BenchmarkTest.java

private void waitUntilFinish(ThreadPoolExecutor executor) {
    final BlockingQueue<Runnable> workQueue = executor.getQueue();
    try {//w  w  w.  jav a  2 s . com
        final long started = System.currentTimeMillis();

        int lastToGo = workQueue.size();

        final int total = workQueue.size();
        int performed = 0;

        int maxStatusMessageLength = 0;
        while (!workQueue.isEmpty()) {
            final float doneNow = lastToGo - workQueue.size();
            performed += doneNow;

            final float elapsed = (System.currentTimeMillis() - started) / 1000;

            lastToGo = workQueue.size();

            if (performed > 0) {
                final float throughput = performed / elapsed;
                final float eta = ((elapsed * total) / performed) - elapsed;

                final float percentDone = (100 * (float) lastToGo) / total;
                final int gaugeDone = (int) ((100 - percentDone) / 5);
                final String gauge = "[" + StringUtils.repeat("", gaugeDone)
                        + StringUtils.repeat("-", 20 - gaugeDone) + "]";

                final String sampling = this.profilingQueue.size() > 0
                        ? MessageFormat.format(" | Samples {0}", this.profilingQueue.size())
                        : "";

                if ((maxStatusMessageLength != 0) || (eta > 5)) {
                    String statusMessage = MessageFormat.format(
                            "\r{4} %{5,number,00.00} | ETA {2} | LAST TPS {0} ops / sec | AVG TPS {1,number,#.0} | LEFT {3}{6}", //
                            doneNow, throughput, this.etaToString((int) eta), workQueue.size(), gauge,
                            percentDone, sampling);

                    maxStatusMessageLength = Math.max(statusMessage.length(), maxStatusMessageLength);
                    statusMessage = StringUtils.leftPad(statusMessage,
                            maxStatusMessageLength - statusMessage.length());
                    System.out.print(statusMessage);
                }
            }

            if (elapsed > BenchmarkTest.MAX_TEST_TIME) {
                throw new IllegalStateException("Max allowed test time exceeded");
            }

            Thread.sleep(1000);
        }

        if (maxStatusMessageLength > 0) {
            System.out.print("\r" + StringUtils.repeat(" ", maxStatusMessageLength) + "\r");
        }

        executor.shutdown();

        if (!executor.awaitTermination(10, TimeUnit.SECONDS)) {
            BenchmarkTest.LOG.warn("Forcefully shutting down the thread pool");

            executor.shutdownNow();
        }

        BenchmarkTest.LOG.warn("Iterations completed");
    } catch (final InterruptedException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.batoo.jpa.core.impl.criteria.QueryImpl.java

private void dumpResultSet() throws SQLException {
    final int[] lengths = new int[this.labels.length];
    for (int i = 0; i < lengths.length; i++) {
        lengths[i] = this.max(lengths[i], StringUtils.length(this.labels[i]));
    }// w  ww. ja v a  2s. c  om

    for (final Object[] data : this.data) {
        for (int i = 0; i < this.labels.length; i++) {
            final Object value = data[i];
            if (value != null) {
                lengths[i] = this.max(lengths[i], StringUtils.length(value.toString()));
            }
        }
    }

    int length = 1;
    for (final int l : lengths) {
        length += l + 3;
    }

    final StringBuffer dump = new StringBuffer("Query returned {0} row(s):\n");

    // the labels
    dump.append(StringUtils.repeat("-", length));
    dump.append("\n| ");

    for (int i = 0; i < this.labels.length; i++) {
        String strValue = StringUtils.abbreviate(this.labels[i], lengths[i]);
        strValue = StringUtils.rightPad(strValue, lengths[i]);

        dump.append(strValue);
        dump.append(" | ");
    }

    // the data
    dump.append("\n");
    dump.append(StringUtils.repeat("-", length));

    for (final Object[] data : this.data) {
        dump.append("\n| ");

        for (int i = 0; i < this.labels.length; i++) {
            final Object value = data[i];

            String strValue = value != null ? value.toString() : "!NULL!";
            strValue = StringUtils.abbreviate(strValue, lengths[i]);
            if (value instanceof Number) {
                strValue = StringUtils.leftPad(strValue, lengths[i]);
            } else {
                strValue = StringUtils.rightPad(strValue, lengths[i]);
            }

            dump.append(strValue);
            dump.append(" | ");
        }

    }

    dump.append("\n");
    dump.append(StringUtils.repeat("-", length));

    QueryImpl.LOG.debug(dump.toString(), this.data.size());
}