Example usage for org.apache.hadoop.io IntWritable get

List of usage examples for org.apache.hadoop.io IntWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io IntWritable get.

Prototype

public int get() 

Source Link

Document

Return the value of this IntWritable.

Usage

From source file:org.apache.accumulo.server.tabletserver.log.MultiReaderTest.java

License:Apache License

private void scanOdd(MultiReader reader, int start) throws IOException {
    IntWritable key = new IntWritable();
    BytesWritable value = new BytesWritable();

    for (int i = start + 2; i < 1000; i += 2) {
        assertTrue(reader.next(key, value));
        assertEquals(i, key.get());
    }//from   w  w  w .  j a va 2s. co  m
}

From source file:org.apache.accumulo.server.tabletserver.log.MultiReaderTest.java

License:Apache License

@Test
public void testMultiReader() throws IOException {
    Path manyMaps = new Path("file://" + root.getRoot().getAbsolutePath() + "/manyMaps");
    MultiReader reader = new MultiReader(fs, manyMaps);
    IntWritable key = new IntWritable();
    BytesWritable value = new BytesWritable();

    for (int i = 0; i < 1000; i++) {
        if (i == 10)
            continue;
        assertTrue(reader.next(key, value));
        assertEquals(i, key.get());
    }//from   www .  ja va 2 s. c  o  m
    assertEquals(value.compareTo(new BytesWritable("someValue".getBytes())), 0);
    assertFalse(reader.next(key, value));

    key.set(500);
    assertTrue(reader.seek(key));
    scan(reader, 500);
    key.set(10);
    assertFalse(reader.seek(key));
    scan(reader, 10);
    key.set(1000);
    assertFalse(reader.seek(key));
    assertFalse(reader.next(key, value));
    key.set(-1);
    assertFalse(reader.seek(key));
    key.set(0);
    assertTrue(reader.next(key, value));
    assertEquals(0, key.get());
    reader.close();

    fs.deleteRecursively(new Path(manyMaps, "even"));
    reader = new MultiReader(fs, manyMaps);
    key.set(501);
    assertTrue(reader.seek(key));
    scanOdd(reader, 501);
    key.set(1000);
    assertFalse(reader.seek(key));
    assertFalse(reader.next(key, value));
    key.set(-1);
    assertFalse(reader.seek(key));
    key.set(1);
    assertTrue(reader.next(key, value));
    assertEquals(1, key.get());
    reader.close();

}

From source file:org.apache.accumulo.tserver.log.RecoveryLogsReaderTest.java

License:Apache License

private void scan(RecoveryLogReader reader, int start) throws IOException {
    IntWritable key = new IntWritable();
    BytesWritable value = new BytesWritable();

    for (int i = start + 1; i < 1000; i++) {
        if (i == 10)
            continue;
        assertTrue(reader.next(key, value));
        assertEquals(i, key.get());
    }//from  ww w.j  a  v a 2s  .  co  m
}

From source file:org.apache.accumulo.tserver.log.RecoveryLogsReaderTest.java

License:Apache License

private void scanOdd(RecoveryLogReader reader, int start) throws IOException {
    IntWritable key = new IntWritable();
    BytesWritable value = new BytesWritable();

    for (int i = start + 2; i < 1000; i += 2) {
        assertTrue(reader.next(key, value));
        assertEquals(i, key.get());
    }/*from w  w  w  .j  a  v  a  2 s  .co m*/
}

From source file:org.apache.accumulo.tserver.log.RecoveryLogsReaderTest.java

License:Apache License

@Test
public void testMultiReader() throws IOException {
    Path manyMaps = new Path("file://" + root.getRoot().getAbsolutePath() + "/manyMaps");
    RecoveryLogReader reader = new RecoveryLogReader(fs, manyMaps);
    IntWritable key = new IntWritable();
    BytesWritable value = new BytesWritable();

    for (int i = 0; i < 1000; i++) {
        if (i == 10)
            continue;
        assertTrue(reader.next(key, value));
        assertEquals(i, key.get());
    }/*from   ww  w  .  j ava 2  s  .c  o  m*/
    assertEquals(value.compareTo(new BytesWritable("someValue".getBytes())), 0);
    assertFalse(reader.next(key, value));

    key.set(500);
    assertTrue(reader.seek(key));
    scan(reader, 500);
    key.set(10);
    assertFalse(reader.seek(key));
    scan(reader, 10);
    key.set(1000);
    assertFalse(reader.seek(key));
    assertFalse(reader.next(key, value));
    key.set(-1);
    assertFalse(reader.seek(key));
    key.set(0);
    assertTrue(reader.next(key, value));
    assertEquals(0, key.get());
    reader.close();

    fs.deleteRecursively(new Path(manyMaps, "even"));
    reader = new RecoveryLogReader(fs, manyMaps);
    key.set(501);
    assertTrue(reader.seek(key));
    scanOdd(reader, 501);
    key.set(1000);
    assertFalse(reader.seek(key));
    assertFalse(reader.next(key, value));
    key.set(-1);
    assertFalse(reader.seek(key));
    key.set(1);
    assertTrue(reader.next(key, value));
    assertEquals(1, key.get());
    reader.close();

}

From source file:org.apache.ambari.servicemonitor.jobs.FileUsingReducer.java

License:Apache License

/**
 * Reduce: return the first value in the list if present
 *
 * @param key the key.//from  www . j av a 2  s  .c o  m
 * @param values the list of values to reduce.
 * @param output to collect keys and combined values.
 * @param reporter facility to report progress.
 * @throws IOException on a file IO problem
 */
@Override
public void reduce(IntWritable key, Iterator<IntWritable> values,
        OutputCollector<IntWritable, IntWritable> output, Reporter reporter) throws IOException {
    operation.execute(reporter);
    int sum = 0;
    while (values.hasNext()) {
        IntWritable next = values.next();
        sum += next.get();
    }
    iw.set(sum);
    if (values.hasNext()) {
        output.collect(key, iw);
    }
}

From source file:org.apache.avro.hadoop.io.TestAvroSequenceFile.java

License:Apache License

/** Tests that reading and writing ordinary Writables still works. */
@Test//  ww w .  j av a 2  s . c o  m
public void testReadWritables() throws IOException {
    Path sequenceFilePath = new Path(new File(mTempDir.getRoot(), "output.seq").getPath());

    writeSequenceFile(sequenceFilePath, Text.class, IntWritable.class, null, null, new Text("one"),
            new IntWritable(1), new Text("two"), new IntWritable(2));

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);
    AvroSequenceFile.Reader.Options options = new AvroSequenceFile.Reader.Options().withFileSystem(fs)
            .withInputPath(sequenceFilePath).withConfiguration(conf);
    SequenceFile.Reader reader = new AvroSequenceFile.Reader(options);

    Text key = new Text();
    IntWritable value = new IntWritable();

    // Read the first record.
    assertTrue(reader.next(key));
    assertEquals("one", key.toString());
    reader.getCurrentValue(value);
    assertNotNull(value);
    assertEquals(1, value.get());

    // Read the second record.
    assertTrue(reader.next(key));
    assertEquals("two", key.toString());
    reader.getCurrentValue(value);
    assertNotNull(value);
    assertEquals(2, value.get());

    assertFalse("Should be no more records.", reader.next(key));
}

From source file:org.apache.beam.runners.spark.io.hadoop.HadoopFileFormatPipelineTest.java

License:Apache License

@Test
public void testSequenceFile() throws Exception {
    populateFile();/*  w  ww.  j av  a 2  s .co m*/

    PipelineOptions options = PipelineOptionsFactory.create();
    options.setRunner(SparkRunner.class);
    Pipeline p = Pipeline.create(options);
    @SuppressWarnings("unchecked")
    Class<? extends FileInputFormat<IntWritable, Text>> inputFormatClass = (Class<? extends FileInputFormat<IntWritable, Text>>) (Class<?>) SequenceFileInputFormat.class;
    HadoopIO.Read.Bound<IntWritable, Text> read = HadoopIO.Read.from(inputFile.getAbsolutePath(),
            inputFormatClass, IntWritable.class, Text.class);
    PCollection<KV<IntWritable, Text>> input = p.apply(read)
            .setCoder(KvCoder.of(WritableCoder.of(IntWritable.class), WritableCoder.of(Text.class)));
    @SuppressWarnings("unchecked")
    Class<? extends FileOutputFormat<IntWritable, Text>> outputFormatClass = (Class<? extends FileOutputFormat<IntWritable, Text>>) (Class<?>) TemplatedSequenceFileOutputFormat.class;
    @SuppressWarnings("unchecked")
    HadoopIO.Write.Bound<IntWritable, Text> write = HadoopIO.Write.to(outputFile.getAbsolutePath(),
            outputFormatClass, IntWritable.class, Text.class);
    input.apply(write.withoutSharding());
    p.run();

    IntWritable key = new IntWritable();
    Text value = new Text();
    try (Reader reader = new Reader(new Configuration(), Reader.file(new Path(outputFile.toURI())))) {
        int i = 0;
        while (reader.next(key, value)) {
            assertEquals(i, key.get());
            assertEquals("value-" + i, value.toString());
            i++;
        }
    }
}

From source file:org.apache.cassandra.hadoop.SampleColumnMapper.java

License:Apache License

protected void map(IntWritable key, IntWritable value, Context context)
        throws IOException, InterruptedException {
    byte[] columnNameAndValue = String.valueOf(value.get()).getBytes();
    context.write(key, new ColumnWritable(columnNameAndValue, columnNameAndValue));
}

From source file:org.apache.crunch.impl.mem.MemPipelineFileReadingWritingIT.java

License:Apache License

@Test
public void testMemPipelineWriteSequenceFile_PTable() throws IOException {
    // write//from  ww  w . j a va 2 s  .c  om
    final MemTable<Integer, String> collection = new MemTable<Integer, String>(EXPECTED_TABLE, //
            Writables.tableOf(Writables.ints(), Writables.strings()), "test input");
    final Target target = To.sequenceFile(outputFile.toString());
    MemPipeline.getInstance().write(collection, target);

    // read
    final SequenceFile.Reader reader = new Reader(FileSystem.getLocal(baseTmpDir.getDefaultConfiguration()),
            new Path(outputFile.toString()), baseTmpDir.getDefaultConfiguration());
    final List<Pair<Integer, String>> actual = Lists.newArrayList();
    final IntWritable key = new IntWritable();
    final Text value = new Text();
    while (reader.next(key, value)) {
        actual.add(Pair.of(key.get(), value.toString()));
    }
    reader.close();

    // assert read same as written
    assertEquals(EXPECTED_TABLE, actual);
}