Example usage for org.apache.hadoop.mapred JobConf get

List of usage examples for org.apache.hadoop.mapred JobConf get

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf get.

Prototype

public String get(String name) 

Source Link

Document

Get the value of the name property, null if no such property exists.

Usage

From source file:hitune.analysis.mapreduce.processor.SystemLog.java

License:Apache License

@Override
public void run() {

    // TODO Auto-generated method stub
    long timestamp = System.currentTimeMillis();
    JobConf conf = new JobConf(this.conf, SystemLog.class);

    try {//from w ww.ja va  2  s .c  om
        conf.setJobName(this.getClass().getSimpleName() + timestamp);

        conf.setInputFormat(MultiSequenceFileInputFormat.class);
        conf.setMapperClass(SystemLog.MapClass.class);
        conf.setReducerClass(SystemLog.ReduceClass.class);

        Class<? extends WritableComparable> outputKeyClass = Class
                .forName(conf.get(AnalysisProcessorConfiguration.mapoutputKeyClass))
                .asSubclass(WritableComparable.class);
        Class<? extends Writable> outputValueClass = Class
                .forName(conf.get(AnalysisProcessorConfiguration.mapoutputValueClass))
                .asSubclass(Writable.class);
        conf.setMapOutputKeyClass(outputKeyClass);
        conf.setMapOutputValueClass(outputValueClass);

        conf.setOutputKeyClass(Text.class);

        conf.setOutputValueClass(TextArrayWritable.class);
        conf.setOutputFormat(CSVFileOutputFormat.class);

        String outputPaths = conf.get(AnalysisProcessorConfiguration.reportfolder) + "/"
                + conf.get(AnalysisProcessorConfiguration.reportfile);
        String temp_outputPaths = getTempOutputDir(outputPaths);

        if (this.inputfiles != null) {
            log.debug("inputPaths:" + inputfiles);
            FileInputFormat.setInputPaths(conf, inputfiles);
            FileOutputFormat.setOutputPath(conf, new Path(temp_outputPaths));
            try {
                JobClient.runJob(conf);
                moveResults(conf, outputPaths, temp_outputPaths);
            } catch (IOException e) {
                // TODO Auto-generated catch block
                log.warn("For " + getOutputFileName() + " :JOB fails!");
                log.warn(e);
                e.printStackTrace();
                this.MOVE_DONE = false;
            }

        } else {
            log.warn("For " + getOutputFileName() + " :No input path!");

        }

    } catch (Exception e) {
        log.warn("Job preparation failure!");
        log.warn(e);
        e.printStackTrace();
    }

}

From source file:hivemall.ftvec.amplify.RandomAmplifierUDTF.java

License:Open Source License

@Override
public void configure(MapredContext mapredContext) {
    JobConf jobconf = mapredContext.getJobConf();
    String seed = jobconf.get(HivemallConstants.CONFKEY_RAND_AMPLIFY_SEED);
    this.useSeed = (seed != null);
    if (useSeed) {
        this.seed = Long.parseLong(seed);
    }/* w  ww  .  j  a v  a  2 s  .  c  o  m*/
}

From source file:hivemall.smile.tools.TreePredictUDF.java

License:Apache License

@Override
public void configure(MapredContext context) {
    super.configure(context);

    if (context != null) {
        JobConf conf = context.getJobConf();
        String tdJarVersion = conf.get("td.jar.version");
        if (tdJarVersion != null) {
            this.support_javascript_eval = false;
        }/*  ww w .  j a  va 2  s . co m*/
    }
}

From source file:hivemall.smile.utils.SmileTaskExecutor.java

License:Apache License

public SmileTaskExecutor(@Nullable MapredContext mapredContext) {
    int nprocs = Runtime.getRuntime().availableProcessors();
    int threads = Math.max(1, nprocs - 1);

    if (mapredContext != null) {
        JobConf conf = mapredContext.getJobConf();
        if (conf != null) {
            String tdJarVersion = conf.get("td.jar.version");
            if (tdJarVersion == null) {
                String hivemallNprocs = conf.get("hivemall.smile.nprocs");
                threads = Primitives.parseInt(hivemallNprocs, threads);
            } else {
                String tdHivemallNprocs = conf.get("td.hivemall.smile.nprocs");
                // invokes in the caller's thread if `td.hivemall.smile.nprocs` is not set
                threads = Primitives.parseInt(tdHivemallNprocs, 1);
            }//  ww w .j  a v  a2s  .co  m
        }
    }

    if (threads > 1) {
        logger.info("Initialized FixedThreadPool of " + threads + " threads");
        this.exec = ExecutorFactory.newFixedThreadPool(threads, "Hivemall-SMILE", true);
    } else {
        logger.info("Direct execution in a caller thread is selected");
        this.exec = null;
    }
}

From source file:hivemall.utils.hadoop.HadoopUtils.java

License:Open Source License

@Nonnull
public static String getJobId() {
    MapredContext ctx = MapredContextAccessor.get();
    if (ctx == null) {
        throw new IllegalStateException("MapredContext is not set");
    }// w w  w.  j  a v  a 2s .co  m
    JobConf conf = ctx.getJobConf();
    if (conf == null) {
        throw new IllegalStateException("JobConf is not set");
    }
    String jobId = conf.get("mapred.job.id");
    if (jobId == null) {
        jobId = conf.get("mapreduce.job.id");
        if (jobId == null) {
            String queryId = conf.get("hive.query.id");
            if (queryId != null) {
                return queryId;
            }
            String taskidStr = conf.get("mapred.task.id");
            if (taskidStr == null) {
                throw new IllegalStateException("Cannot resolve jobId: " + toString(conf));
            }
            jobId = getJobIdFromTaskId(taskidStr);
        }
    }
    return jobId;
}

From source file:hydrograph.engine.hadoop.inputformat.TupleMemoryInputFormat.java

License:Apache License

public static long retrieveNumTuples(JobConf conf, String key) {
    String s = conf.get(key);
    if (s == null)

        return 0;

    String[] pieces = s.split(":");
    return Long.valueOf(pieces[0]);

}

From source file:hydrograph.engine.hadoop.inputformat.TupleMemoryInputFormat.java

License:Apache License

public static ITupleGenerator retrieveTupleGenerator(JobConf conf, String key) {
    String s = conf.get(key);
    if (s == null)

        return null;

    String[] pieces = s.split(":");

    byte[] val;

    if (pieces.length > 1) {
        val = decodeBytes(pieces[1]);
    } else {// w  w w .j  a v  a  2s .  co m
        val = new byte[0];
    }

    ByteArrayInputStream stream = new ByteArrayInputStream(val);
    ObjectInputStream in;

    ITupleGenerator tupleGenerator;
    try {
        in = new ObjectInputStream(stream);
        tupleGenerator = (ITupleGenerator) in.readObject();
        in.close();
    } catch (IOException e) {
        throw new RuntimeException(e);
    } catch (ClassNotFoundException e) {
        throw new RuntimeException(e);
    }

    return tupleGenerator;
}

From source file:hydrograph.engine.hadoop.recordreader.DelimitedAndFixedWidthRecordReader.java

License:Apache License

public DelimitedAndFixedWidthRecordReader(JobConf conf, FileSplit split) throws IOException {
    lengthsAndDelimiters = DelimitedAndFixedWidthHelper
            .modifyIdentifier(DelimitedAndFixedWidthHelper.stringToArray(conf.get("lengthsAndDelimiters")));
    lengthsAndDelimitersType = conf.getStrings("lengthsAndDelimitersType");
    quote = conf.get("quote");
    charsetName = conf.get("charsetName");
    start = split.getStart();//from   w w  w.ja  v a 2  s.c  om
    pos = start;
    end = start + split.getLength();
    file = split.getPath();
    fs = file.getFileSystem(conf);
    fileIn = fs.open(split.getPath());
    fileIn.seek(start);
    inputStreamReader = new InputStreamReader(fileIn, charsetName);
    singleChar = new char[1];
    stringBuilder = new StringBuilder();
    isQuotePresent = isQuotePresent(quote);
}

From source file:hydrograph.engine.spark.recordreader.DelimitedAndFixedWidthRecordReader.java

License:Apache License

public DelimitedAndFixedWidthRecordReader(JobConf conf, FileSplit split) throws IOException {
    lengthsAndDelimiters = DelimitedAndFixedWidthHelper.modifyIdentifier(
            conf.get("lengthsAndDelimiters").split(Constants.LENGTHS_AND_DELIMITERS_SEPARATOR));
    lengthsAndDelimitersType = conf.get("lengthsAndDelimitersType")
            .split(Constants.LENGTHS_AND_DELIMITERS_SEPARATOR);
    quote = conf.get("quote");
    charsetName = conf.get("charsetName");
    start = split.getStart();//from  w w  w.  ja va  2 s. c om
    pos = start;
    end = start + split.getLength();
    file = split.getPath();
    fs = file.getFileSystem(conf);
    fileIn = fs.open(split.getPath());
    fileIn.seek(start);
    inputStreamReader = new InputStreamReader(fileIn, charsetName);
    singleChar = new char[1];
    stringBuilder = new StringBuilder();
    isQuotePresent = isQuotePresent(quote);
}

From source file:IndexService.IndexIFormatOutputWriter.java

License:Open Source License

public IndexIFormatOutputWriter(String fileName, JobConf job) throws IOException {
    this.conf = job;
    ifdf = new IFormatDataFile(job);
    ihead = new IHead();
    String[] fieldStrings = job.getStrings(ConstVar.HD_fieldMap);
    IFieldMap fieldMap = new IFieldMap();
    for (int i = 0; i < fieldStrings.length; i++) {
        String[] def = fieldStrings[i].split(ConstVar.RecordSplit);
        byte type = Byte.valueOf(def[0]);
        int index = Short.valueOf(def[1]);
        fieldMap.addFieldType(new IRecord.IFType(type, index));
    }/*  w  w w.  j a  v  a2  s  .c om*/
    ihead.setFieldMap(fieldMap);

    String[] files = job.getStrings(ConstVar.HD_index_filemap);
    IUserDefinedHeadInfo iudhi = new IUserDefinedHeadInfo();
    iudhi.addInfo(123456, job.get("datafiletype"));
    for (int i = 0; i < files.length; i++) {
        iudhi.addInfo(i, files[i]);
    }
    ihead.setUdi(iudhi);
    ihead.setPrimaryIndex(0);
    ifdf.create(fileName, ihead);
    record = ifdf.getIRecordObj();
}