Example usage for org.apache.commons.lang StringUtils reverse

List of usage examples for org.apache.commons.lang StringUtils reverse

Introduction

In this page you can find the example usage for org.apache.commons.lang StringUtils reverse.

Prototype

public static String reverse(String str) 

Source Link

Document

Reverses a String as per StrBuilder#reverse() .

Usage

From source file:org.apache.accumulo.examples.wikisearch.ingest.WikipediaPartitionedMapper.java

@Override
protected void map(Text language, Article article, Context context) throws IOException, InterruptedException {
    String NULL_BYTE = "\u0000";
    String colfPrefix = language.toString() + NULL_BYTE;
    String indexPrefix = "fi" + NULL_BYTE;
    ColumnVisibility cv = new ColumnVisibility(cvPrefix + language);

    if (article != null) {
        Text partitionId = new Text(Integer.toString(WikipediaMapper.getPartitionId(article, numPartitions)));

        // Create the mutations for the document.
        // Row is partition id, colf is language0articleid, colq is fieldName\0fieldValue
        Mutation m = new Mutation(partitionId);
        for (Entry<String, Object> entry : article.getFieldValues().entrySet()) {
            m.put(colfPrefix + article.getId(), entry.getKey() + NULL_BYTE + entry.getValue().toString(), cv,
                    article.getTimestamp(), NULL_VALUE);
            // Create mutations for the metadata table.
            MutationInfo mm = new MutationInfo(entry.getKey(), METADATA_EVENT_COLUMN_FAMILY,
                    language.toString(), cv, article.getTimestamp());
            wikiMetadataOutput.put(mm, NULL_VALUE);
        }/* ww w.  j a  v  a2  s.com*/

        // Tokenize the content
        Set<String> tokens = WikipediaMapper.getTokens(article);

        // We are going to put the fields to be indexed into a multimap. This allows us to iterate
        // over the entire set once.
        Multimap<String, String> indexFields = HashMultimap.create();
        // Add the normalized field values
        LcNoDiacriticsNormalizer normalizer = new LcNoDiacriticsNormalizer();
        for (Entry<String, String> index : article.getNormalizedFieldValues().entrySet())
            indexFields.put(index.getKey(), index.getValue());
        // Add the tokens
        for (String token : tokens)
            indexFields.put(TOKENS_FIELD_NAME, normalizer.normalizeFieldValue("", token));

        for (Entry<String, String> index : indexFields.entries()) {
            // Create mutations for the in partition index
            // Row is partition id, colf is 'fi'\0fieldName, colq is fieldValue\0language\0article id
            m.put(indexPrefix + index.getKey(), index.getValue() + NULL_BYTE + colfPrefix + article.getId(), cv,
                    article.getTimestamp(), NULL_VALUE);

            // Create mutations for the global index
            // Row is field value, colf is field name, colq is partitionid\0language, value is Uid.List object
            MutationInfo gm = new MutationInfo(index.getValue(), index.getKey(),
                    partitionId + NULL_BYTE + language, cv, article.getTimestamp());
            wikiIndexOutput.put(gm, new CountAndSet(Integer.toString(article.getId())));

            // Create mutations for the global reverse index
            MutationInfo grm = new MutationInfo(StringUtils.reverse(index.getValue()), index.getKey(),
                    partitionId + NULL_BYTE + language, cv, article.getTimestamp());
            wikiReverseIndexOutput.put(grm, new CountAndSet(Integer.toString(article.getId())));

            // Create mutations for the metadata table.
            MutationInfo mm = new MutationInfo(index.getKey(), METADATA_INDEX_COLUMN_FAMILY,
                    language + NULL_BYTE + LcNoDiacriticsNormalizer.class.getName(), cv,
                    article.getTimestamp());
            wikiMetadataOutput.put(mm, NULL_VALUE);
        }
        // Add the entire text to the document section of the table.
        // row is the partition, colf is 'd', colq is language\0articleid, value is Base64 encoded GZIP'd document
        m.put(DOCUMENT_COLUMN_FAMILY, colfPrefix + article.getId(), cv, article.getTimestamp(),
                new Value(Base64.encodeBase64(article.getText().getBytes())));
        context.write(tablename, m);

    } else {
        context.getCounter("wikipedia", "invalid articles").increment(1);
    }
    context.progress();
}

From source file:org.apache.hadoop.hive.ql.exec.Utilities.java

/**
 * convert "From src insert blah blah" to "From src insert ... blah"
 *//* w  ww. java2  s.  c  o m*/
public static String abbreviate(String str, int max) {
    str = str.trim();

    int len = str.length();
    int suffixlength = 20;

    if (len <= max) {
        return str;
    }

    suffixlength = Math.min(suffixlength, (max - 3) / 2);
    String rev = StringUtils.reverse(str);

    // get the last few words
    String suffix = WordUtils.abbreviate(rev, 0, suffixlength, "");
    suffix = StringUtils.reverse(suffix);

    // first few ..
    String prefix = StringUtils.abbreviate(str, max - suffix.length());

    return prefix + suffix;
}

From source file:org.apache.kylin.cube.cuboid.Cuboid.java

public static String getDisplayName(long cuboidID, int dimensionCount) {
    StringBuilder sb = new StringBuilder();
    for (int i = 0; i < dimensionCount; ++i) {
        if ((cuboidID & (1L << i)) == 0) {
            sb.append('0');
        } else {//from w  w w .j a va2 s . c  om
            sb.append('1');
        }
    }
    return StringUtils.reverse(sb.toString());
}

From source file:org.apache.rya.indexing.accumulo.freetext.AccumuloFreeTextIndexer.java

private Set<String> unrollWildcard(final String string, final boolean reverse) throws IOException {
    final Scanner termTableScan = getScanner(getFreeTextTermTablename(conf));

    final Set<String> unrolledTerms = new HashSet<String>();

    Text queryTerm;/*from  w ww  .  j  a v  a  2 s . co  m*/
    if (reverse) {
        final String t = StringUtils.removeStart(string, "*").toLowerCase();
        queryTerm = ColumnPrefixes.getRevTermListColFam(t);
    } else {
        final String t = StringUtils.removeEnd(string, "*").toLowerCase();
        queryTerm = ColumnPrefixes.getTermListColFam(t);
    }

    // perform query and read results
    termTableScan.setRange(Range.prefix(queryTerm));

    for (final Entry<Key, Value> e : termTableScan) {
        final String term = ColumnPrefixes.removePrefix(e.getKey().getRow()).toString();
        if (reverse) {
            unrolledTerms.add(StringUtils.reverse(term));
        } else {
            unrolledTerms.add(term);
        }
    }

    if (unrolledTerms.isEmpty()) {
        // put in a placeholder term that will never be in the index.
        unrolledTerms.add("\1\1\1");
    }

    return unrolledTerms;
}

From source file:org.apache.solr.analytics.util.valuesource.ReverseStringFunction.java

protected CharSequence func(int doc, FunctionValues vals) throws IOException {
    String val = vals.strVal(doc);
    return val != null ? StringUtils.reverse(val) : null;
}

From source file:org.apache.tapestry5.func.ZippedFlowTests.java

@Test
public void mapTuples() {
    Tuple<String, String> firstTuple = zipped
            .mapTuples(new Mapper<Tuple<Integer, String>, Tuple<String, String>>() {
                public Tuple<String, String> map(Tuple<Integer, String> value) {
                    return Tuple.create(StringUtils.reverse(value.second),
                            String.format("%d-%d", value.first, value.second.length()));
                }//from   w  ww  .  j a  v  a2  s  . c om

            }).first();

    assertEquals(firstTuple.first, "derf");
    assertEquals(firstTuple.second, "1-4");
}

From source file:org.castafiore.utils.StringUtil.java

private static String sep(String s) {
    StringBuilder b = new StringBuilder();
    if (s.length() > 3) {
        int counter = 0;
        s = StringUtils.reverse(s);
        for (char c : s.toCharArray()) {
            b.append(c);/* www .ja  va  2s  .c  om*/
            counter++;
            if (counter == 3) {
                b.append(' ');
                counter = 0;
            }
        }

        return StringUtils.reverse(b.toString().trim());
    } else {
        return s;
    }
}

From source file:org.displaytag.decorator.AutolinkColumnDecorator.java

/**
 * @see org.displaytag.decorator.ColumnDecorator#decorate(java.lang.Object)
 *///from w w  w  . j  ava2 s  . c  om
public String decorate(Object columnValue) {
    if (columnValue == null) {
        return null;
    }
    String work = columnValue.toString();

    int urlBegin;
    StringBuffer buffer = new StringBuffer();

    // First check for email addresses.
    while ((urlBegin = work.indexOf('@')) != -1) {
        int start = 0;
        int end = work.length() - 1;

        // scan backwards...
        for (int j = urlBegin; j >= 0; j--) {
            if (Character.isWhitespace(work.charAt(j))) {
                start = j + 1;
                break;
            }
        }

        // scan forwards...
        for (int j = urlBegin; j <= end; j++) {
            if (Character.isWhitespace(work.charAt(j))) {
                end = j - 1;
                break;
            }
        }

        String email = work.substring(start, end + 1);

        buffer.append(work.substring(0, start)).append("<a href=\"mailto:") //$NON-NLS-1$
                .append(email + "\">") //$NON-NLS-1$
                .append(email).append("</a>"); //$NON-NLS-1$

        if (end == work.length()) {
            work = TagConstants.EMPTY_STRING;
        } else {
            work = work.substring(end + 1);
        }
    }

    work = buffer.toString() + work;
    buffer = new StringBuffer();

    // Now check for urls...
    while ((urlBegin = work.indexOf(URL_DELIM)) != -1) {

        // scan backwards...
        int fullUrlBegin = urlBegin;
        StringBuffer prefixBuffer = new StringBuffer(10);
        for (int j = fullUrlBegin - 1; j >= 0; j--) {
            if (Character.isWhitespace(work.charAt(j))) {
                fullUrlBegin = j + 1;
                break;
            }
            fullUrlBegin = j;
            prefixBuffer.append(work.charAt(j));
        }

        if (!ArrayUtils.contains(URLS_PREFIXES, StringUtils.reverse(prefixBuffer.toString()))) {

            buffer.append(work.substring(0, urlBegin + 3));
            work = work.substring(urlBegin + 3);
            continue;
        }

        int urlEnd = work.length();

        // scan forwards...
        for (int j = urlBegin; j < urlEnd; j++) {
            if (Character.isWhitespace(work.charAt(j))) {
                urlEnd = j;
                break;
            }
        }

        String url = work.substring(fullUrlBegin, urlEnd);

        buffer.append(work.substring(0, fullUrlBegin)).append("<a href=\"")//$NON-NLS-1$
                .append(url).append("\">")//$NON-NLS-1$
                .append(url).append("</a>"); //$NON-NLS-1$

        if (urlEnd >= work.length()) {
            work = TagConstants.EMPTY_STRING;
        } else {
            work = work.substring(urlEnd);
        }
    }

    buffer.append(work);
    return buffer.toString();
}

From source file:org.failearly.dataz.template.encoder.support.EncodersTest.java

@Test
public void chain__should_apply_encoders_from_left_to_right() throws Exception {
    // arrange / given
    final Encoder<String, String> encoder = Encoders.chain(Encoders.stringToByteArray(), invertByteArray(),
            Encoders.byteArrayToString());

    // assert / then
    assertThat(encoder.encode(ANY_STRING), is(StringUtils.reverse(ANY_STRING)));
}

From source file:org.failearly.dataz.template.encoder.support.EncodersTest.java

private static Encoder invertByteArray() {
    return new EncoderBase<byte[], byte[]>() {
        @Override//  w  w  w.ja va2  s  .  c o m
        public byte[] encode(byte[] value) throws Exception {
            return StringUtils.reverse(new String(value)).getBytes();
        }
    };
}