Example usage for org.apache.hadoop.io Text toString

List of usage examples for org.apache.hadoop.io Text toString

Introduction

In this page you can find the example usage for org.apache.hadoop.io Text toString.

Prototype

@Override
public String toString() 

Source Link

Document

Convert text back to string

Usage

From source file:com.cloudera.science.avro.streaming.AvroAsJSONRecordWriter.java

License:Open Source License

@Override
public void write(Text key, Text value) throws IOException {
    writer.append(converter.convert(readKey ? key.toString() : value.toString()));
}

From source file:com.cloudera.science.matching.VertexData.java

License:Open Source License

public VertexData(Text vertexId, VertexState vertexState, Iterable<Edge<Text, IntWritable>> edges) {
    this.vertexId = vertexId.toString();
    this.bidder = vertexState.isBidder();
    this.edges = Maps.newHashMap();
    for (Edge<Text, IntWritable> e : edges) {
        this.edges.put(e.getTargetVertexId().toString(), e.getValue().get());
    }/*w ww.  j  av  a2 s.co  m*/
    this.priceIndex = Maps.newHashMap();
    for (Map.Entry<Text, BigDecimal> e : vertexState.getPriceIndex().entrySet()) {
        priceIndex.put(e.getKey().toString(), e.getValue().toString());
    }
    this.matchId = vertexState.getMatchId().toString();
    this.price = vertexState.getPrice().toString();
}

From source file:com.cloudera.sqoop.lib.RecordParser.java

License:Apache License

/**
 * Return a list of strings representing the fields of the input line.
 * This list is backed by an internal buffer which is cleared by the
 * next call to parseRecord().//from  w  w w .  ja va 2  s.c  o m
 */
public List<String> parseRecord(Text input) throws ParseError {
    if (null == input) {
        throw new ParseError("null input string");
    }

    // TODO(aaron): The parser should be able to handle UTF-8 strings
    // as well, to avoid this transcode operation.
    return parseRecord(input.toString());
}

From source file:com.cloudera.sqoop.mapreduce.MySQLTextExportMapper.java

License:Apache License

/**
 * Export the table to MySQL by using mysqlimport to write the data to the
 * database.//from   ww w.j  a  v a 2s  . c om
 *
 * Expects one delimited text record as the 'val'; ignores the key.
 */
@Override
public void map(LongWritable key, Text val, Context context) throws IOException, InterruptedException {

    writeRecord(val.toString(), this.recordEndStr);

    // We don't emit anything to the OutputCollector because we wrote
    // straight to mysql. Send a progress indicator to prevent a timeout.
    context.progress();
}

From source file:com.cloudera.sqoop.testutil.ReparseMapper.java

License:Apache License

public void map(LongWritable key, Text val, OutputCollector<Text, NullWritable> out, Reporter r)
        throws IOException {

    LOG.info("Mapper input line: " + val.toString());

    try {//from   www  .  j a  v a 2s .c o m
        // Use the user's record class to parse the line back in.
        userRecord.parse(val);
    } catch (RecordParser.ParseError pe) {
        LOG.error("Got parse error: " + pe.toString());
        throw new IOException(pe);
    }

    LOG.info("Mapper output line: " + userRecord.toString());

    out.collect(new Text(userRecord.toString()), NullWritable.get());

    if (!userRecord.toString(false).equals(val.toString())) {
        // Could not format record w/o end-of-record delimiter.
        throw new IOException("Returned string w/o EOR has value [" + userRecord.toString(false) + "] when ["
                + val.toString() + "] was expected.");
    }

    if (!userRecord.toString().equals(val.toString() + "\n")) {
        // misparsed.
        throw new IOException("Returned string has value [" + userRecord.toString() + "] when ["
                + val.toString() + "\n] was expected.");
    }
}

From source file:com.cloudera.traffic.AveragerMapper.java

License:Apache License

@Override
public void map(LongWritable key, Text line, Context context) throws InterruptedException, IOException {
    String[] tokens = line.toString().split(",");
    if (tokens.length < 10) {
        context.getCounter("Averager Counters", "Blank lines").increment(1);
        return;/*w w w  . j a  va2 s .  c  o  m*/
    }
    String dateTime = tokens[0];
    String stationId = tokens[1];
    String trafficCount = tokens[9];

    if (trafficCount.length() > 0) {
        id.set(stationId + "_" + TimeUtil.toTimeOfWeek(dateTime));
        if (trafficCount.matches("[0-9]+")) {
            outAverage.set(1, Integer.parseInt(trafficCount));
        } else {
            context.getCounter("Averager Counters", "Missing vehicle flows").increment(1);
        }

        context.write(id, outAverage);
    }
}

From source file:com.conversantmedia.mapreduce.example.avro.AvroWordCountReducer.java

License:Apache License

@Override
protected void reduce(Text key, Iterable<LongWritable> values, Context context)
        throws IOException, InterruptedException {
    int sum = 0;/*from   w  w  w.jav  a 2s . c  o m*/
    for (LongWritable value : values) {
        sum += value.get();
    }
    AvroExample.Builder builder = AvroExample.newBuilder();
    builder.setWord(key.toString());
    builder.setFrequency(sum);
    AvroExample datum = builder.build();
    aKey.datum(datum);

    context.write(aKey, NullWritable.get());
    avroMultiOut.write(aKey, NullWritable.get(), "avroMulti");
}

From source file:com.conversantmedia.mapreduce.example.distribute.WordCountWithBlacklistMapper2.java

License:Apache License

@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
    String line = value.toString();
    StringTokenizer tokenizer = new StringTokenizer(line);
    while (tokenizer.hasMoreTokens()) {
        String nextWord = tokenizer.nextToken().replaceAll("\\W", "");
        if (!blacklistedWords.contains(nextWord)) {
            word.set(nextWord);//from   w  w  w  .  j ava 2  s .  c om
            context.write(word, ONE);
        }
    }
}

From source file:com.conversantmedia.mapreduce.example.NamedOutputWordCountMapper.java

License:Apache License

@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
    String line = value.toString();
    StringTokenizer tokenizer = new StringTokenizer(line);
    while (tokenizer.hasMoreTokens()) {
        word.set(tokenizer.nextToken().replaceAll("\\W", ""));
        context.write(word, ONE);/*from   w  w  w.  ja v a2  s . c  o  m*/
    }

    // Debug output
    if (line.length() > 10) {
        multiOut.write("DEBUG", new Text(line.length() + ":"), value);
    }
}

From source file:com.conversantmedia.mapreduce.example.WordCountMapper.java

License:Apache License

@Override
protected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {
    String line = value.toString();
    StringTokenizer tokenizer = new StringTokenizer(line);
    while (tokenizer.hasMoreTokens()) {
        word.set(tokenizer.nextToken().replaceAll("\\W", ""));
        context.write(word, ONE);//from  w w w  . ja v  a 2s. com
    }
}