Example usage for org.apache.hadoop.mapred JobConf setLong

List of usage examples for org.apache.hadoop.mapred JobConf setLong

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf setLong.

Prototype

public void setLong(String name, long value) 

Source Link

Document

Set the value of the name property to a long.

Usage

From source file:com.facebook.presto.hive.ParquetRecordWriterUtil.java

License:Apache License

public static RecordWriter createParquetWriter(Path target, JobConf conf, Properties properties,
        boolean compress, ConnectorSession session) throws IOException, ReflectiveOperationException {
    conf.setLong(ParquetOutputFormat.BLOCK_SIZE, getParquetWriterBlockSize(session).toBytes());
    conf.setLong(ParquetOutputFormat.PAGE_SIZE, getParquetWriterPageSize(session).toBytes());

    RecordWriter recordWriter = new MapredParquetOutputFormat().getHiveRecordWriter(conf, target, Text.class,
            compress, properties, Reporter.NULL);

    Object realWriter = REAL_WRITER_FIELD.get(recordWriter);
    Object internalWriter = INTERNAL_WRITER_FIELD.get(realWriter);
    ParquetFileWriter fileWriter = (ParquetFileWriter) FILE_WRITER_FIELD.get(internalWriter);

    return new ExtendedRecordWriter() {
        private long length;

        @Override/* w w w . j  ava  2s .c  om*/
        public long getWrittenBytes() {
            return length;
        }

        @Override
        public void write(Writable value) throws IOException {
            recordWriter.write(value);
            length = fileWriter.getPos();
        }

        @Override
        public void close(boolean abort) throws IOException {
            recordWriter.close(abort);
            if (!abort) {
                length = target.getFileSystem(conf).getFileStatus(target).getLen();
            }
        }
    };
}

From source file:com.hdfs.concat.crush.CrushOptionParsingTest.java

License:Apache License

@Before
public void before() throws IOException {
    crush = new Crush();

    JobConf job = new JobConf(false);
    crush.setConf(job);//ww  w  .j a  v  a 2s  . c  o m

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.setInt("mapred.reduce.tasks", 20);
    job.setLong("dfs.block.size", 1024 * 1024 * 64);

    FileSystem fs = FileSystem.get(job);
    fs.setWorkingDirectory(new Path(tmp.getRoot().getAbsolutePath()));

    crush.setFileSystem(fs);
}

From source file:com.hdfs.concat.crush.CrushReducerTest.java

License:Apache License

@Before
public void setupReducer() {
    JobConf job = new JobConf(false);

    job.set("mapred.tip.id", "task_201011081200_014527_r_001234");
    job.set("mapred.task.id", "attempt_201011081200_14527_r_001234_0");

    outDir = tmp.newFolder("out");
    tmp.newFolder("out/_temporary");

    job.set("mapred.output.dir", outDir.getAbsolutePath());

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");

    job.setLong("crush.timestamp", 98765);

    job.setInt("crush.num.specs", 3);
    job.set("crush.0.regex", ".+/dir");
    job.set("crush.0.regex.replacement", "firstregex-${crush.timestamp}-${crush.task.num}-${crush.file.num}");
    job.set("crush.0.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.0.output.format", TextOutputFormat.class.getName());

    job.set("crush.1.regex", ".+/dir/([^/]+/)*(.+)");
    job.set("crush.1.regex.replacement",
            "secondregex-$2-${crush.timestamp}-${crush.task.num}-${crush.file.num}");
    job.set("crush.1.input.format", TextInputFormat.class.getName());
    job.set("crush.1.output.format", TextOutputFormat.class.getName());

    job.set("crush.2.regex", ".+/other");
    job.set("crush.2.regex.replacement", "${crush.timestamp}-${crush.task.num}-middle-${crush.file.num}-tail");
    job.set("crush.2.input.format", TextInputFormat.class.getName());
    job.set("crush.2.output.format", SequenceFileOutputFormat.class.getName());

    reducer = new CrushReducer();

    reducer.configure(job);/*w w w . j a  va  2  s .  c o  m*/
}

From source file:com.hdfs.concat.crush.CrushReducerTest.java

License:Apache License

@Test
public void missingInputRegex() {
    JobConf job = new JobConf(false);

    job.set("mapred.tip.id", "task_201011081200_14527_r_1234");

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.set("mapred.output.dir", outDir.getAbsolutePath());

    job.setLong("crush.timestamp", 98765);

    job.setLong("dfs.block.size", 1024 * 1024 * 64L);

    job.setInt("crush.num.specs", 2);
    job.set("crush.0.regex", "foo");
    job.set("crush.0.regex.replacement", "bar");
    job.set("crush.0.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.0.output.format", TextOutputFormat.class.getName());

    job.set("crush.1.regex.replacement", "bar");
    job.set("crush.1.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.1.output.format", TextOutputFormat.class.getName());

    reducer = new CrushReducer();

    try {/* www .  ja v a2s  .  c o  m*/
        reducer.configure(job);
        fail();
    } catch (IllegalArgumentException e) {
        if (!"No input regex: crush.1.regex".equals(e.getMessage())) {
            throw e;
        }
    }
}

From source file:com.hdfs.concat.crush.CrushReducerTest.java

License:Apache License

@Test
public void missingOutputRegex() {
    JobConf job = new JobConf(false);

    job.set("mapred.tip.id", "task_201011081200_14527_r_1234");

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.set("mapred.output.dir", outDir.getAbsolutePath());

    job.setLong("crush.timestamp", 98765);

    job.setLong("dfs.block.size", 1024 * 1024 * 64L);

    job.setInt("crush.num.specs", 2);
    job.set("crush.0.regex", "foo");
    job.set("crush.0.regex.replacement", "bar");
    job.set("crush.0.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.0.output.format", TextOutputFormat.class.getName());

    job.set("crush.1.regex", "hello");
    job.set("crush.1.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.1.output.format", TextOutputFormat.class.getName());

    reducer = new CrushReducer();

    try {//from  w  w  w  . j  a  va2  s  . c  om
        reducer.configure(job);
        fail();
    } catch (IllegalArgumentException e) {
        if (!"No output replacement: crush.1.regex.replacement".equals(e.getMessage())) {
            throw e;
        }
    }
}

From source file:com.hdfs.concat.crush.CrushReducerTest.java

License:Apache License

@Test
public void missingInputFormat() {
    JobConf job = new JobConf(false);

    job.set("mapred.tip.id", "task_201011081200_14527_r_1234");

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.set("mapred.output.dir", outDir.getAbsolutePath());

    job.setLong("crush.timestamp", 98765);

    job.setLong("dfs.block.size", 1024 * 1024 * 64L);

    job.setInt("crush.num.specs", 2);
    job.set("crush.0.regex", "foo");
    job.set("crush.0.regex.replacement", "bar");
    job.set("crush.0.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.0.output.format", SequenceFileOutputFormat.class.getName());

    job.set("crush.1.regex", "hello");
    job.set("crush.1.regex.replacement", "hello");
    job.set("crush.1.output.format", SequenceFileOutputFormat.class.getName());

    reducer = new CrushReducer();

    try {/*from  w w  w. ja  va 2 s  .c  o m*/
        reducer.configure(job);
        fail();
    } catch (IllegalArgumentException e) {
        if (!"No input format: crush.1.input.format".equals(e.getMessage())) {
            throw e;
        }
    }
}

From source file:com.hdfs.concat.crush.CrushReducerTest.java

License:Apache License

@Test
public void inputFormatWrongType() {
    JobConf job = new JobConf(false);

    job.set("mapred.tip.id", "task_201011081200_14527_r_1234");

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.set("mapred.output.dir", outDir.getAbsolutePath());

    job.setLong("crush.timestamp", 98765);

    job.setLong("dfs.block.size", 1024 * 1024 * 64L);

    job.setInt("crush.num.specs", 2);
    job.set("crush.0.regex", "foo");
    job.set("crush.0.regex.replacement", "bar");
    job.set("crush.0.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.0.output.format", SequenceFileOutputFormat.class.getName());

    job.set("crush.1.regex", "hello");
    job.set("crush.1.regex.replacement", "hello");
    job.set("crush.1.input.format", Object.class.getName());
    job.set("crush.1.output.format", SequenceFileOutputFormat.class.getName());

    reducer = new CrushReducer();

    try {//  ww  w  .  j a  va  2 s  . c om
        reducer.configure(job);
        fail();
    } catch (IllegalArgumentException e) {
        if (!"Not a file input format: crush.1.input.format=java.lang.Object".equals(e.getMessage())) {
            throw e;
        }
    }
}

From source file:com.hdfs.concat.crush.CrushReducerTest.java

License:Apache License

@Test
public void missingOutputFormat() {
    JobConf job = new JobConf(false);

    job.set("mapred.tip.id", "task_201011081200_14527_r_1234");

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.set("mapred.output.dir", outDir.getAbsolutePath());

    job.setLong("crush.timestamp", 98765);

    job.setLong("dfs.block.size", 1024 * 1024 * 64L);

    job.setInt("crush.num.specs", 2);
    job.set("crush.0.regex", "foo");
    job.set("crush.0.regex.replacement", "bar");
    job.set("crush.0.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.0.output.format", SequenceFileOutputFormat.class.getName());

    job.set("crush.1.regex", "hello");
    job.set("crush.1.regex.replacement", "hello");
    job.set("crush.1.input.format", SequenceFileInputFormat.class.getName());

    reducer = new CrushReducer();

    try {/*from  ww w .j a va2s .  co  m*/
        reducer.configure(job);
        fail();
    } catch (IllegalArgumentException e) {
        if (!"No output format: crush.1.output.format".equals(e.getMessage())) {
            throw e;
        }
    }
}

From source file:com.hdfs.concat.crush.CrushReducerTest.java

License:Apache License

@Test
public void outputFormatWrongType() {
    JobConf job = new JobConf(false);

    job.set("mapred.tip.id", "task_201011081200_14527_r_1234");

    job.set("fs.default.name", "file:///");
    job.set("fs.file.impl", "org.apache.hadoop.fs.LocalFileSystem");
    job.set("mapred.output.dir", outDir.getAbsolutePath());

    job.setLong("crush.timestamp", 98765);

    job.setLong("dfs.block.size", 1024 * 1024 * 64L);

    job.setInt("crush.num.specs", 2);
    job.set("crush.0.regex", "foo");
    job.set("crush.0.regex.replacement", "bar");
    job.set("crush.0.input.format", SequenceFileInputFormat.class.getName());
    job.set("crush.0.output.format", SequenceFileOutputFormat.class.getName());

    job.set("crush.1.regex", "hello");
    job.set("crush.1.regex.replacement", "hello");
    job.set("crush.1.input.format", TextInputFormat.class.getName());
    job.set("crush.1.output.format", Object.class.getName());

    reducer = new CrushReducer();

    try {/*from  ww w.  j  a v  a 2  s  . c o m*/
        reducer.configure(job);
        fail();
    } catch (IllegalArgumentException e) {
        if (!"Not an output format: crush.1.output.format=java.lang.Object".equals(e.getMessage())) {
            throw e;
        }
    }
}

From source file:com.ibm.bi.dml.runtime.matrix.data.hadoopfix.DelegatingInputFormat.java

License:Apache License

@SuppressWarnings("unchecked")
public RecordReader<K, V> getRecordReader(InputSplit split, JobConf conf, Reporter reporter)
        throws IOException {

    // Find the InputFormat and then the RecordReader from the
    // TaggedInputSplit.

    TaggedInputSplit taggedInputSplit = (TaggedInputSplit) split;
    InputFormat<K, V> inputFormat = (InputFormat<K, V>) ReflectionUtils
            .newInstance(taggedInputSplit.getInputFormatClass(), conf);
    InputSplit inputSplit = taggedInputSplit.getInputSplit();
    if (inputSplit instanceof FileSplit) {
        FileSplit fileSplit = (FileSplit) inputSplit;
        conf.set("map.input.file", fileSplit.getPath().toString());
        conf.setLong("map.input.start", fileSplit.getStart());
        conf.setLong("map.input.length", fileSplit.getLength());
    }/*  ww  w  .  j  a v  a 2 s. c om*/

    return inputFormat.getRecordReader(taggedInputSplit.getInputSplit(), conf, reporter);
}