Example usage for org.apache.hadoop.io NullWritable get

List of usage examples for org.apache.hadoop.io NullWritable get

Introduction

In this page you can find the example usage for org.apache.hadoop.io NullWritable get.

Prototype

public static NullWritable get() 

Source Link

Document

Returns the single instance of this class.

Usage

From source file:org.apache.jena.hadoop.rdf.mapreduce.NullPlusValueMapper.java

License:Apache License

@Override
protected void map(TKey key, TValue value, Context context) throws IOException, InterruptedException {
    if (this.tracing) {
        LOG.trace("Value = {}", value);
    }//  w ww  .  ja  va2  s .  co  m
    context.write(NullWritable.get(), value);
}

From source file:org.apache.jena.hadoop.rdf.mapreduce.NullPlusValueReducer.java

License:Apache License

@Override
protected void reduce(TKey key, Iterable<TValue> values, Context context)
        throws IOException, InterruptedException {
    if (this.tracing) {
        LOGGER.trace("Input Key = {}", key);
    }/*from w w  w.j  a  va2s. c  o m*/
    Iterator<TValue> iter = values.iterator();
    while (iter.hasNext()) {
        TValue value = iter.next();
        if (tracing) {
            LOGGER.trace("Input Value = {}", value);
        }
        context.write(NullWritable.get(), value);
    }
}

From source file:org.apache.jena.hadoop.rdf.mapreduce.TestDistinctTriples.java

License:Apache License

@Test
public void distinct_triples_01() throws IOException {
    MapReduceDriver<LongWritable, TripleWritable, TripleWritable, NullWritable, NullWritable, TripleWritable> driver = this
            .getMapReduceDriver();//from w  ww .  ja v a2s  .c  o  m

    Triple t = new Triple(NodeFactory.createURI("urn:s"), NodeFactory.createURI("urn:p"),
            NodeFactory.createLiteral("1"));
    TripleWritable tw = new TripleWritable(t);
    driver.addInput(new LongWritable(1), tw);
    driver.addOutput(NullWritable.get(), tw);

    driver.runTest();
}

From source file:org.apache.jena.hadoop.rdf.mapreduce.TestDistinctTriples.java

License:Apache License

@Test
public void distinct_triples_02() throws IOException {
    MapReduceDriver<LongWritable, TripleWritable, TripleWritable, NullWritable, NullWritable, TripleWritable> driver = this
            .getMapReduceDriver();//ww  w  . j a  v  a2  s. c om

    Triple t = new Triple(NodeFactory.createURI("urn:s"), NodeFactory.createURI("urn:p"),
            NodeFactory.createLiteral("1"));
    TripleWritable tw = new TripleWritable(t);
    for (int i = 0; i < 100; i++) {
        driver.addInput(new LongWritable(i), tw);
    }
    driver.addOutput(NullWritable.get(), tw);

    driver.runTest();
}

From source file:org.apache.jena.hadoop.rdf.mapreduce.TestDistinctTriples.java

License:Apache License

@Test
public void distinct_triples_03() throws IOException {
    MapReduceDriver<LongWritable, TripleWritable, TripleWritable, NullWritable, NullWritable, TripleWritable> driver = this
            .getMapReduceDriver();//from   w  w w  .j av a  2s.c o  m

    Triple t = new Triple(NodeFactory.createURI("urn:s"), NodeFactory.createURI("urn:p"),
            NodeFactory.createLiteral("1"));
    Triple t2 = new Triple(t.getSubject(), t.getPredicate(), NodeFactory.createLiteral("2"));
    Assert.assertNotEquals(t, t2);

    TripleWritable tw = new TripleWritable(t);
    TripleWritable tw2 = new TripleWritable(t2);
    Assert.assertNotEquals(tw, tw2);

    driver.addInput(new LongWritable(1), tw);
    driver.addInput(new LongWritable(2), tw2);
    driver.addOutput(NullWritable.get(), tw);
    driver.addOutput(NullWritable.get(), tw2);

    driver.runTest(false);
}

From source file:org.apache.jena.hadoop.rdf.mapreduce.TestDistinctTriples.java

License:Apache License

@Test
public void distinct_triples_04() throws IOException {
    MapReduceDriver<LongWritable, TripleWritable, TripleWritable, NullWritable, NullWritable, TripleWritable> driver = this
            .getMapReduceDriver();/* w  w  w. j a  va  2  s .c om*/

    Node s1 = NodeFactory.createURI("urn:nf#cbf2b2c7-109e-4097-bbea-f67f272c7fcc");
    Node s2 = NodeFactory.createURI("urn:nf#bb08b75c-1ad2-47ef-acd2-eb2d92b94b89");
    Node p = NodeFactory.createURI("urn:p");
    Node o = NodeFactory.createURI("urn:66.230.159.118");
    Assert.assertNotEquals(s1, s2);

    Triple t1 = new Triple(s1, p, o);
    Triple t2 = new Triple(s2, p, o);
    Assert.assertNotEquals(t1, t2);

    TripleWritable tw1 = new TripleWritable(t1);
    TripleWritable tw2 = new TripleWritable(t2);
    Assert.assertNotEquals(tw1, tw2);
    Assert.assertNotEquals(0, tw1.compareTo(tw2));

    driver.addInput(new LongWritable(1), tw1);
    driver.addInput(new LongWritable(2), tw2);
    driver.addOutput(NullWritable.get(), tw1);
    driver.addOutput(NullWritable.get(), tw2);

    driver.runTest(false);
}

From source file:org.apache.jena.hadoop.rdf.mapreduce.ValuePlusNullMapper.java

License:Apache License

@Override
protected void map(TKey key, TValue value, Context context) throws IOException, InterruptedException {
    if (this.tracing) {
        LOG.trace("Value = {}", value);
    }/*from  ww  w .j a  v a 2s  . co m*/
    context.write(value, NullWritable.get());
}

From source file:org.apache.jena.tdbloader4.partitioners.InputSampler.java

License:Apache License

private static <K> void writePartitionFile(K[] samples, String indexName, Job job, Configuration conf,
        int numPartitions) throws IOException {
    @SuppressWarnings("unchecked")
    RawComparator<K> comparator = (RawComparator<K>) job.getSortComparator();
    K[] shuffledSamples = reshuffleSamples(samples, indexName, comparator, numPartitions);
    log.debug("Size of permutated samples is {}", shuffledSamples.length);
    Path dst = new Path(TotalOrderPartitioner.getPartitionFile(conf) + "_" + indexName);
    log.debug("Writing to {}", dst);
    FileSystem fs = dst.getFileSystem(conf);
    if (fs.exists(dst)) {
        fs.delete(dst, false);/*from www. j a  v  a 2  s. c  o m*/
    }
    SequenceFile.Writer writer = SequenceFile.createWriter(fs, conf, dst, job.getMapOutputKeyClass(),
            NullWritable.class);
    NullWritable nullValue = NullWritable.get();
    float stepSize = shuffledSamples.length / (float) numPartitions;
    log.debug("Step size is {}", stepSize);
    int last = -1;
    for (int i = 1; i < numPartitions; ++i) {
        int k = Math.round(stepSize * i);
        while (last >= k && comparator.compare(shuffledSamples[last], shuffledSamples[k]) == 0) {
            ++k;
        }
        log.debug("Writing ({},{})", shuffledSamples[k], nullValue);
        writer.append(shuffledSamples[k], nullValue);
        last = k;
    }
    log.debug("Closing {}", dst);
    writer.close();
}

From source file:org.apache.jena.tdbloader4.partitioners.TotalOrderPartitioner.java

License:Apache License

/**
 * Read the cut points from the given IFile.
 * /*from www .j  a v a  2 s  .co m*/
 * @param fs The file system
 * @param p The path to read
 * @param keyClass The map output key class
 * @param job The job config
 * @throws IOException
 */
// matching key types enforced by passing in
@SuppressWarnings("unchecked")
// map output key class
private K[] readPartitions(FileSystem fs, Path p, Class<K> keyClass, Configuration conf) throws IOException {
    log.debug("readPartitions({}, {}, {}, {})", new Object[] { fs, p, keyClass.getSimpleName(), conf });
    SequenceFile.Reader reader = new SequenceFile.Reader(fs, p, conf);
    log.debug("SequenceFile.Reader is {}", reader);
    log.debug("SequenceFile.Reader position is {}", reader.getPosition());
    ArrayList<K> parts = new ArrayList<K>();
    K key = ReflectionUtils.newInstance(keyClass, conf);
    NullWritable value = NullWritable.get();
    while (reader.next(key, value)) {
        log.debug("Partition key {}", key);
        parts.add(key);
        key = ReflectionUtils.newInstance(keyClass, conf);
    }
    reader.close();
    return parts.toArray((K[]) Array.newInstance(keyClass, parts.size()));
}

From source file:org.apache.kudu.mapreduce.ITKuduTableOutputFormat.java

License:Apache License

@Test
public void test() throws Exception {
    createTable(TABLE_NAME, getBasicSchema(), getBasicCreateTableOptions());

    KuduTableOutputFormat output = new KuduTableOutputFormat();
    Configuration conf = new Configuration();
    conf.set(KuduTableOutputFormat.MASTER_ADDRESSES_KEY, getMasterAddresses());
    conf.set(KuduTableOutputFormat.OUTPUT_TABLE_KEY, TABLE_NAME);
    output.setConf(conf);/*from  w w w. j  ava  2s .co m*/

    String multitonKey = conf.get(KuduTableOutputFormat.MULTITON_KEY);
    KuduTable table = KuduTableOutputFormat.getKuduTable(multitonKey);
    assertNotNull(table);

    Insert insert = table.newInsert();
    PartialRow row = insert.getRow();
    row.addInt(0, 1);
    row.addInt(1, 2);
    row.addInt(2, 3);
    row.addString(3, "a string");
    row.addBoolean(4, true);

    RecordWriter<NullWritable, Operation> rw = output.getRecordWriter(null);
    rw.write(NullWritable.get(), insert);
    rw.close(null);
    AsyncKuduScanner.AsyncKuduScannerBuilder builder = client.newScannerBuilder(table);
    assertEquals(1, countRowsInScan(builder.build()));
}