Example usage for org.apache.commons.lang StringUtils repeat

List of usage examples for org.apache.commons.lang StringUtils repeat

Introduction

In this page you can find the example usage for org.apache.commons.lang StringUtils repeat.

Prototype

public static String repeat(String str, int repeat) 

Source Link

Document

Repeat a String repeat times to form a new String.

Usage

From source file:org.apache.hadoop.crypto.key.KeyShell.java

@Override
public String getCommandUsage() {
    StringBuffer sbuf = new StringBuffer(USAGE_PREFIX + COMMANDS);
    String banner = StringUtils.repeat("=", 66);
    sbuf.append(banner + "\n");
    sbuf.append(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC + "\n");
    sbuf.append(banner + "\n");
    sbuf.append(RollCommand.USAGE + ":\n\n" + RollCommand.DESC + "\n");
    sbuf.append(banner + "\n");
    sbuf.append(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC + "\n");
    sbuf.append(banner + "\n");
    sbuf.append(ListCommand.USAGE + ":\n\n" + ListCommand.DESC + "\n");
    return sbuf.toString();
}

From source file:org.apache.hadoop.hdfs.server.diskbalancer.command.PlanCommand.java

/**
 * Prints a quick summary of the plan to screen.
 *
 * @param plans - List of NodePlans.//from   w w w  . java  2  s .  c  om
 */
static private void printToScreen(List<NodePlan> plans) {
    System.out.println("\nPlan :\n");
    System.out.println(StringUtils.repeat("=", 80));

    System.out.println(StringUtils.center("Source Disk", 30) + StringUtils.center("Dest.Disk", 30)
            + StringUtils.center("Size", 10) + StringUtils.center("Type", 10));

    for (NodePlan plan : plans) {
        for (Step step : plan.getVolumeSetPlans()) {
            System.out.println(
                    String.format("%s %s %s %s", StringUtils.center(step.getSourceVolume().getPath(), 30),
                            StringUtils.center(step.getDestinationVolume().getPath(), 30),
                            StringUtils.center(step.getSizeString(step.getBytesToMove()), 10),
                            StringUtils.center(step.getDestinationVolume().getStorageType(), 10)));
        }
    }

    System.out.println(StringUtils.repeat("=", 80));
}

From source file:org.apache.hadoop.hdfs.tools.TableListing.java

@Override
public String toString() {
    StringBuilder builder = new StringBuilder();
    // Calculate the widths of each column based on their maxWidths and
    // the wrapWidth for the entire table
    int width = (columns.length - 1) * 2; // inter-column padding
    for (int i = 0; i < columns.length; i++) {
        width += columns[i].maxWidth;/*from   ww w .  j  a  v a2  s  . c  o m*/
    }
    // Decrease the column size of wrappable columns until the goal width
    // is reached, or we can't decrease anymore
    while (width > wrapWidth) {
        boolean modified = false;
        for (int i = 0; i < columns.length; i++) {
            Column column = columns[i];
            if (column.wrap) {
                int maxWidth = column.getMaxWidth();
                if (maxWidth > 4) {
                    column.setWrapWidth(maxWidth - 1);
                    modified = true;
                    width -= 1;
                    if (width <= wrapWidth) {
                        break;
                    }
                }
            }
        }
        if (!modified) {
            break;
        }
    }

    int startrow = 0;
    if (!showHeader) {
        startrow = 1;
    }
    String[][] columnLines = new String[columns.length][];
    for (int i = startrow; i < numRows + 1; i++) {
        int maxColumnLines = 0;
        for (int j = 0; j < columns.length; j++) {
            columnLines[j] = columns[j].getRow(i);
            if (columnLines[j].length > maxColumnLines) {
                maxColumnLines = columnLines[j].length;
            }
        }

        for (int c = 0; c < maxColumnLines; c++) {
            // First column gets no left-padding
            String prefix = "";
            for (int j = 0; j < columns.length; j++) {
                // Prepend padding
                builder.append(prefix);
                prefix = " ";
                if (columnLines[j].length > c) {
                    builder.append(columnLines[j][c]);
                } else {
                    builder.append(StringUtils.repeat(" ", columns[j].maxWidth));
                }
            }
            builder.append("\n");
        }
    }
    return builder.toString();
}

From source file:org.apache.hadoop.hive.llap.cache.LowLevelLrfuCachePolicy.java

public String debugDumpHeap() {
    StringBuilder result = new StringBuilder("List: ");
    dumpList(result, listHead, listTail);
    result.append("\nHeap:");
    if (heapSize == 0) {
        result.append(" <empty>\n");
        return result.toString();
    }//from ww  w .java2  s . c  o  m
    result.append("\n");
    int levels = 32 - Integer.numberOfLeadingZeros(heapSize);
    int ix = 0;
    int spacesCount = heap[0].toStringForCache().length() + 3;
    String full = StringUtils.repeat(" ", spacesCount), half = StringUtils.repeat(" ", spacesCount / 2);
    int maxWidth = 1 << (levels - 1);
    for (int i = 0; i < levels; ++i) {
        int width = 1 << i;
        int middleGap = (maxWidth - width) / width;
        for (int j = 0; j < (middleGap >>> 1); ++j) {
            result.append(full);
        }
        if ((middleGap & 1) == 1) {
            result.append(half);
        }
        for (int j = 0; j < width && ix < heapSize; ++j, ++ix) {
            if (j != 0) {
                for (int k = 0; k < middleGap; ++k) {
                    result.append(full);
                }
                if (middleGap == 0) {
                    result.append(" ");
                }
            }
            if ((j & 1) == 0) {
                result.append("(");
            }
            result.append(heap[ix].toStringForCache());
            if ((j & 1) == 1) {
                result.append(")");
            }
        }
        result.append("\n");
    }
    return result.toString();
}

From source file:org.apache.hadoop.hive.ql.metadata.formatting.MetaDataPrettyFormatUtils.java

/**
 * Appends the specified text with alignment to sb.
 * Also appends an appopriately sized delimiter.
 * @return The number of columns consumed by the aligned string and the
 * delimiter.//ww  w.j a  va2s. c om
 */
private static int appendFormattedColumn(StringBuilder sb, String text, int alignment) {
    String paddedText = String.format("%-" + alignment + "s", text);
    int delimCount = 0;
    if (paddedText.length() < alignment + PRETTY_MAX_INTERCOL_SPACING) {
        delimCount = (alignment + PRETTY_MAX_INTERCOL_SPACING) - paddedText.length();
    } else {
        delimCount = PRETTY_MAX_INTERCOL_SPACING;
    }
    String delim = StringUtils.repeat(" ", delimCount);
    sb.append(paddedText);
    sb.append(delim);
    sb.append(MetaDataFormatUtils.FIELD_DELIM);

    return paddedText.length() + delim.length();
}

From source file:org.apache.hadoop.ipc.TestProtoBufRpc.java

@Test(timeout = 6000)
public void testExtraLongRpc() throws Exception {
    TestRpcService2 client = getClient2();
    final String shortString = StringUtils.repeat("X", 4);
    // short message goes through
    EchoResponseProto echoResponse = client.echo2(null, newEchoRequest(shortString));
    Assert.assertEquals(shortString, echoResponse.getMessage());

    final String longString = StringUtils.repeat("X", 4096);
    try {//  w  w  w  . ja  v  a 2  s.  c  om
        client.echo2(null, newEchoRequest(longString));
        Assert.fail("expected extra-long RPC to fail");
    } catch (ServiceException se) {
        // expected
    }
}

From source file:org.apache.hadoop.security.alias.CredentialShell.java

@Override
public String getCommandUsage() {
    StringBuffer sbuf = new StringBuffer(USAGE_PREFIX + COMMANDS);
    String banner = StringUtils.repeat("=", 66);
    sbuf.append(banner + "\n");
    sbuf.append(CreateCommand.USAGE + ":\n\n" + CreateCommand.DESC + "\n");
    sbuf.append(banner + "\n");
    sbuf.append(DeleteCommand.USAGE + ":\n\n" + DeleteCommand.DESC + "\n");
    sbuf.append(banner + "\n");
    sbuf.append(ListCommand.USAGE + ":\n\n" + ListCommand.DESC + "\n");
    return sbuf.toString();
}

From source file:org.apache.hadoop.security.token.DtFileOperations.java

/** Print out a Credentials object.
 *  @param creds the Credentials object to be printed out.
 *  @param alias print only tokens matching alias (null matches all).
 *  @param out print to this stream./*ww  w.  ja  v a 2s . com*/
 *  @throws IOException
 */
public static void printCredentials(Credentials creds, Text alias, PrintStream out) throws IOException {
    boolean tokenHeader = true;
    String fmt = "%-24s %-20s %-15s %-12s %s%n";
    for (Token<?> token : creds.getAllTokens()) {
        if (matchAlias(token, alias)) {
            if (tokenHeader) {
                out.printf(fmt, "Token kind", "Service", "Renewer", "Exp date", "URL enc token");
                out.println(StringUtils.repeat("-", 80));
                tokenHeader = false;
            }
            AbstractDelegationTokenIdentifier id = (AbstractDelegationTokenIdentifier) token.decodeIdentifier();
            out.printf(fmt, token.getKind(), token.getService(), (id != null) ? id.getRenewer() : NA_STRING,
                    (id != null) ? formatDate(id.getMaxDate()) : NA_STRING, token.encodeToUrlString());
        }
    }
}

From source file:org.apache.hadoop.yarn.client.cli.LogsCLI.java

private void printContainerLogsFromRunningApplication(Configuration conf, String appId, String containerIdStr,
        String nodeHttpAddress, String nodeId, String[] logFiles, LogCLIHelpers logCliHelper, String appOwner)
        throws IOException {
    String[] requestedLogFiles = logFiles;
    // fetch all the log files for the container
    if (fetchAllLogFiles(logFiles)) {
        requestedLogFiles = getContainerLogFiles(getConf(), containerIdStr, nodeHttpAddress);
    }/*from   w  w  w.ja v a  2 s  . c om*/
    Client webServiceClient = Client.create();
    String containerString = "\n\nContainer: " + containerIdStr;
    System.out.println(containerString);
    System.out.println(StringUtils.repeat("=", containerString.length()));

    for (String logFile : requestedLogFiles) {
        System.out.println("LogType:" + logFile);
        System.out.println("Log Upload Time:" + Times.format(System.currentTimeMillis()));
        System.out.println("Log Contents:");
        try {
            WebResource webResource = webServiceClient
                    .resource(WebAppUtils.getHttpSchemePrefix(conf) + nodeHttpAddress);
            ClientResponse response = webResource.path("ws").path("v1").path("node").path("containerlogs")
                    .path(containerIdStr).path(logFile).accept(MediaType.TEXT_PLAIN).get(ClientResponse.class);
            System.out.println(response.getEntity(String.class));
            System.out.println("End of LogType:" + logFile);
        } catch (ClientHandlerException | UniformInterfaceException ex) {
            System.out.println("Can not find the log file:" + logFile + " for the container:" + containerIdStr
                    + " in NodeManager:" + nodeId);
        }
    }
    // for the case, we have already uploaded partial logs in HDFS
    logCliHelper.dumpAContainersLogsForALogType(appId, containerIdStr, nodeId, appOwner,
            Arrays.asList(requestedLogFiles));
}

From source file:org.apache.hadoop.yarn.client.cli.LogsCLI.java

private void printContainerLogsForFinishedApplication(String appId, String containerId, String nodeAddress,
        String[] logFiles, LogCLIHelpers logCliHelper, String appOwner) throws IOException {
    String containerString = "\n\nContainer: " + containerId;
    System.out.println(containerString);
    System.out.println(StringUtils.repeat("=", containerString.length()));
    logCliHelper.dumpAContainersLogsForALogType(appId, containerId, nodeAddress, appOwner,
            logFiles != null ? Arrays.asList(logFiles) : null);
}