Example usage for org.apache.hadoop.io NullWritable get

List of usage examples for org.apache.hadoop.io NullWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io NullWritable get.

Prototype

public static NullWritable get() 

Source Link

Document

Returns the single instance of this class.

Usage

From source file:org.apache.avro.mapred.tether.TetherMapRunner.java

License:Apache License

@SuppressWarnings("unchecked")
public void run(RecordReader<TetherData, NullWritable> recordReader,
        OutputCollector<TetherData, NullWritable> collector, Reporter reporter) throws IOException {
    try {/*from   w  w  w . j av  a  2s  .co  m*/
        // start tethered process
        process = new TetheredProcess(job, collector, reporter);

        // configure it
        process.inputClient.configure(TaskType.MAP, job.get(AvroJob.INPUT_SCHEMA),
                AvroJob.getMapOutputSchema(job).toString());

        process.inputClient.partitions(job.getNumReduceTasks());

        // run map
        Counter inputRecordCounter = reporter.getCounter("org.apache.hadoop.mapred.Task$Counter",
                "MAP_INPUT_RECORDS");
        TetherData data = new TetherData();
        while (recordReader.next(data, NullWritable.get())) {
            process.inputClient.input(data.buffer(), data.count());
            inputRecordCounter.increment(data.count() - 1);
            if (process.outputService.isFinished())
                break;
        }
        process.inputClient.complete();

        // wait for completion
        if (process.outputService.waitForFinish())
            throw new IOException("Task failed: " + process.outputService.error());

    } catch (Throwable t) { // send abort
        LOG.warn("Task failed", t);
        process.inputClient.abort();
        throw new IOException("Task failed: " + t, t);

    } finally { // clean up
        if (process != null)
            process.close();
    }
}

From source file:org.apache.avro.mapred.tether.TetherOutputService.java

License:Apache License

public void output(ByteBuffer datum) {
    try {//from   w  w w  . jav  a  2s.  c o m
        collector.collect(new TetherData(datum), NullWritable.get());
    } catch (Throwable e) {
        TetherMapRunner.LOG.warn("Error: " + e, e);
        synchronized (this) {
            error = e.toString();
        }
    }
}

From source file:org.apache.avro.mapreduce.AvroKeyRecordReader.java

License:Apache License

/** {@inheritDoc} */
@Override
public NullWritable getCurrentValue() throws IOException, InterruptedException {
    return NullWritable.get();
}

From source file:org.apache.avro.mapreduce.TestAvroKeyRecordWriter.java

License:Apache License

@Test
public void testWrite() throws IOException {
    Schema writerSchema = Schema.create(Schema.Type.INT);
    GenericData dataModel = new ReflectData();
    CodecFactory compressionCodec = CodecFactory.nullCodec();
    ByteArrayOutputStream outputStream = new ByteArrayOutputStream();
    TaskAttemptContext context = createMock(TaskAttemptContext.class);

    replay(context);//from  ww  w.  j a  v a 2s .co  m

    // Write an avro container file with two records: 1 and 2.
    AvroKeyRecordWriter<Integer> recordWriter = new AvroKeyRecordWriter<Integer>(writerSchema, dataModel,
            compressionCodec, outputStream);
    recordWriter.write(new AvroKey<Integer>(1), NullWritable.get());
    recordWriter.write(new AvroKey<Integer>(2), NullWritable.get());
    recordWriter.close(context);

    verify(context);

    // Verify that the file was written as expected.
    InputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray());
    Schema readerSchema = Schema.create(Schema.Type.INT);
    DatumReader<Integer> datumReader = new SpecificDatumReader<Integer>(readerSchema);
    DataFileStream<Integer> dataFileReader = new DataFileStream<Integer>(inputStream, datumReader);

    assertTrue(dataFileReader.hasNext()); // Record 1.
    assertEquals(1, dataFileReader.next().intValue());
    assertTrue(dataFileReader.hasNext()); // Record 2.
    assertEquals(2, dataFileReader.next().intValue());
    assertFalse(dataFileReader.hasNext()); // No more records.

    dataFileReader.close();
}

From source file:org.apache.beam.sdk.io.hadoop.WritableCoder.java

License:Apache License

@SuppressWarnings("unchecked")
@Override/*from www.j a v  a  2s.  c  o  m*/
public T decode(InputStream inStream) throws IOException {
    try {
        if (type == NullWritable.class) {
            // NullWritable has no default constructor
            return (T) NullWritable.get();
        }
        T t = type.getDeclaredConstructor().newInstance();
        t.readFields(new DataInputStream(inStream));
        return t;
    } catch (InstantiationException | IllegalAccessException | NoSuchMethodException
            | InvocationTargetException e) {
        throw new CoderException("unable to deserialize record", e);
    }
}

From source file:org.apache.beam.sdk.io.hdfs.HDFSFileSink.java

License:Apache License

public static <T> HDFSFileSink<T, NullWritable, Text> toText(String path) {
    SerializableFunction<T, KV<NullWritable, Text>> outputConverter = new SerializableFunction<T, KV<NullWritable, Text>>() {
        @Override//  www .j a  v a  2s  .  c  om
        public KV<NullWritable, Text> apply(T input) {
            return KV.of(NullWritable.get(), new Text(input.toString()));
        }
    };
    return to(path, TextOutputFormat.class, NullWritable.class, Text.class, outputConverter);
}

From source file:org.apache.beam.sdk.io.hdfs.HDFSFileSink.java

License:Apache License

/**
 * Helper to create Avro sink given {@link AvroCoder}. Keep in mind that configuration
 * object is altered to enable Avro output.
 */// w ww.  j a  v  a 2 s  . co m
public static <T> HDFSFileSink<T, AvroKey<T>, NullWritable> toAvro(String path, final AvroCoder<T> coder,
        Configuration conf) {
    SerializableFunction<T, KV<AvroKey<T>, NullWritable>> outputConverter = new SerializableFunction<T, KV<AvroKey<T>, NullWritable>>() {
        @Override
        public KV<AvroKey<T>, NullWritable> apply(T input) {
            return KV.of(new AvroKey<>(input), NullWritable.get());
        }
    };
    conf.set("avro.schema.output.key", coder.getSchema().toString());
    return to(path, AvroKeyOutputFormat.class, (Class<AvroKey<T>>) (Class<?>) AvroKey.class, NullWritable.class,
            outputConverter).withConfiguration(conf);
}

From source file:org.apache.beam.sdk.io.hdfs.HDFSFileSinkTest.java

License:Apache License

@Test
public void testWriteSingleRecord() throws Exception {
    PipelineOptions options = PipelineOptionsFactory.create();
    File file = tmpFolder.newFolder();

    HDFSFileSink<String, NullWritable, Text> sink = HDFSFileSink.to(file.toString(),
            SequenceFileOutputFormat.class, NullWritable.class, Text.class,
            new SerializableFunction<String, KV<NullWritable, Text>>() {
                @Override/*from  w w w . ja  v a2s . c  om*/
                public KV<NullWritable, Text> apply(String input) {
                    return KV.of(NullWritable.get(), new Text(input));
                }
            });

    doWrite(sink, options, Collections.singletonList(foobar));

    SequenceFile.Reader.Option opts = SequenceFile.Reader.file(new Path(file.toString(), part0));
    SequenceFile.Reader reader = new SequenceFile.Reader(new Configuration(), opts);
    assertEquals(NullWritable.class.getName(), reader.getKeyClassName());
    assertEquals(Text.class.getName(), reader.getValueClassName());
    NullWritable k = NullWritable.get();
    Text v = new Text();
    assertEquals(true, reader.next(k, v));
    assertEquals(NullWritable.get(), k);
    assertEquals(new Text(foobar), v);
}

From source file:org.apache.camel.component.hdfs.HdfsConsumerTest.java

License:Apache License

@Test
public void testReadBoolean() throws Exception {
    if (!canTest()) {
        return;/*from   ww w. j  av a2 s. c  o  m*/
    }

    final Path file = new Path(new File("target/test/test-camel-boolean").getAbsolutePath());
    Configuration conf = new Configuration();
    FileSystem fs1 = FileSystem.get(file.toUri(), conf);
    SequenceFile.Writer writer = createWriter(fs1, conf, file, NullWritable.class, BooleanWritable.class);
    NullWritable keyWritable = NullWritable.get();
    BooleanWritable valueWritable = new BooleanWritable();
    valueWritable.set(true);
    writer.append(keyWritable, valueWritable);
    writer.sync();
    writer.close();

    context.addRoutes(new RouteBuilder() {
        public void configure() {
            from("hdfs:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0")
                    .to("mock:result");
        }
    });
    context.start();

    MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
    resultEndpoint.expectedMessageCount(1);
    resultEndpoint.assertIsSatisfied();
}

From source file:org.apache.camel.component.hdfs.HdfsConsumerTest.java

License:Apache License

@Test
public void testReadByte() throws Exception {
    if (!canTest()) {
        return;/*from w  w w.  j a va  2s.co  m*/
    }

    final Path file = new Path(new File("target/test/test-camel-byte").getAbsolutePath());
    Configuration conf = new Configuration();
    FileSystem fs1 = FileSystem.get(file.toUri(), conf);
    SequenceFile.Writer writer = createWriter(fs1, conf, file, NullWritable.class, ByteWritable.class);
    NullWritable keyWritable = NullWritable.get();
    ByteWritable valueWritable = new ByteWritable();
    byte value = 3;
    valueWritable.set(value);
    writer.append(keyWritable, valueWritable);
    writer.sync();
    writer.close();

    MockEndpoint resultEndpoint = context.getEndpoint("mock:result", MockEndpoint.class);
    resultEndpoint.expectedMessageCount(1);
    resultEndpoint.message(0).body(byte.class).isEqualTo(3);

    context.addRoutes(new RouteBuilder() {
        public void configure() {
            from("hdfs:///" + file.toUri() + "?fileSystemType=LOCAL&fileType=SEQUENCE_FILE&initialDelay=0")
                    .to("mock:result");
        }
    });
    context.start();

    resultEndpoint.assertIsSatisfied();
}