Example usage for org.apache.hadoop.mapred JobConf setJar

List of usage examples for org.apache.hadoop.mapred JobConf setJar

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf setJar.

Prototype

public void setJar(String jar) 

Source Link

Document

Set the user jar for the map-reduce job.

Usage

From source file:azkaban.jobtype.javautils.HadoopUtils.java

License:Apache License

public static void setClassLoaderAndJar(JobConf conf, Class<?> jobClass) {
    conf.setClassLoader(Thread.currentThread().getContextClassLoader());
    String jar = findContainingJar(jobClass, Thread.currentThread().getContextClassLoader());
    if (jar != null) {
        conf.setJar(jar);
    }//from www  . j  a v  a2  s .c o m
}

From source file:com.cloudera.sqoop.orm.TestParseMethods.java

License:Apache License

public void runParseTest(String fieldTerminator, String lineTerminator, String encloser, String escape,
        boolean encloseRequired) throws IOException {

    ClassLoader prevClassLoader = null;

    String[] argv = getArgv(true, fieldTerminator, lineTerminator, encloser, escape, encloseRequired);
    runImport(argv);//  w w  w .j  a v  a2  s . c o  m
    try {
        String tableClassName = getTableName();

        argv = getArgv(false, fieldTerminator, lineTerminator, encloser, escape, encloseRequired);
        SqoopOptions opts = new ImportTool().parseArguments(argv, null, null, true);

        CompilationManager compileMgr = new CompilationManager(opts);
        String jarFileName = compileMgr.getJarFilename();

        // Make sure the user's class is loaded into our address space.
        prevClassLoader = ClassLoaderStack.addJarFile(jarFileName, tableClassName);

        JobConf job = new JobConf();
        job.setJar(jarFileName);

        // Tell the job what class we're testing.
        job.set(ReparseMapper.USER_TYPE_NAME_KEY, tableClassName);

        // use local mode in the same JVM.
        ConfigurationHelper.setJobtrackerAddr(job, "local");
        if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
            job.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
        }
        String warehouseDir = getWarehouseDir();
        Path warehousePath = new Path(warehouseDir);
        Path inputPath = new Path(warehousePath, getTableName());
        Path outputPath = new Path(warehousePath, getTableName() + "-out");

        job.setMapperClass(ReparseMapper.class);
        job.setNumReduceTasks(0);
        FileInputFormat.addInputPath(job, inputPath);
        FileOutputFormat.setOutputPath(job, outputPath);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);

        JobClient.runJob(job);
    } catch (InvalidOptionsException ioe) {
        fail(ioe.toString());
    } catch (ParseException pe) {
        fail(pe.toString());
    } finally {
        if (null != prevClassLoader) {
            ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
        }
    }
}

From source file:com.cloudera.sqoop.orm.TestParseMethods.java

License:Apache License

public void testFieldSetter() throws IOException {
    ClassLoader prevClassLoader = null;

    String[] types = { "VARCHAR(32)", "VARCHAR(32)" };
    String[] vals = { "'meep'", "'foo'" };
    createTableWithColTypes(types, vals);

    String[] argv = getArgv(true, ",", "\\n", "\\\'", "\\", false);
    runImport(argv);//from ww  w .  ja v a 2 s.  c o  m
    try {
        String tableClassName = getTableName();

        argv = getArgv(false, ",", "\\n", "\\\'", "\\", false);
        SqoopOptions opts = new ImportTool().parseArguments(argv, null, null, true);

        CompilationManager compileMgr = new CompilationManager(opts);
        String jarFileName = compileMgr.getJarFilename();

        // Make sure the user's class is loaded into our address space.
        prevClassLoader = ClassLoaderStack.addJarFile(jarFileName, tableClassName);

        JobConf job = new JobConf();
        job.setJar(jarFileName);

        // Tell the job what class we're testing.
        job.set(ExplicitSetMapper.USER_TYPE_NAME_KEY, tableClassName);
        job.set(ExplicitSetMapper.SET_COL_KEY, BASE_COL_NAME + "0");
        job.set(ExplicitSetMapper.SET_VAL_KEY, "this-is-a-test");

        // use local mode in the same JVM.
        ConfigurationHelper.setJobtrackerAddr(job, "local");
        if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
            job.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
        }
        String warehouseDir = getWarehouseDir();
        Path warehousePath = new Path(warehouseDir);
        Path inputPath = new Path(warehousePath, getTableName());
        Path outputPath = new Path(warehousePath, getTableName() + "-out");

        job.setMapperClass(ExplicitSetMapper.class);
        job.setNumReduceTasks(0);
        FileInputFormat.addInputPath(job, inputPath);
        FileOutputFormat.setOutputPath(job, outputPath);

        job.setOutputKeyClass(Text.class);
        job.setOutputValueClass(NullWritable.class);

        JobClient.runJob(job);
    } catch (InvalidOptionsException ioe) {
        fail(ioe.toString());
    } catch (ParseException pe) {
        fail(pe.toString());
    } finally {
        if (null != prevClassLoader) {
            ClassLoaderStack.setCurrentClassLoader(prevClassLoader);
        }
    }
}

From source file:com.google.mr4c.hadoop.MR4CMRJob.java

License:Open Source License

public void applyTo(JobConf jobConf) throws IOException {

    // push all mr4c namespaced properties to the job conf
    HadoopUtils.applyToJobConf(m_config.getProperties(), jobConf);

    String jar = getMR4CJar(m_config);
    if (!StringUtils.isEmpty(jar)) {
        jobConf.setJar(jar);
    }//ww w  .  j  a v  a  2  s . c  o m

    exportProperty(m_config, jobConf, Category.HADOOP, HadoopConfig.PROP_TASKS, PROP_TASKS);

    String clusterName = getClusterName(m_config);
    if (!StringUtils.isEmpty(clusterName)) {
        Cluster cluster = Clusters.getCluster(clusterName);
        cluster.applyToConfig(jobConf);
    }

    S3Credentials cred = S3Credentials.extractFrom(m_config);
    if (cred != null) {
        cred.applyTo(jobConf);
    }

    // Don't export task and job id's, Hadoop should set those

    if (!m_envVars.isEmpty() || !m_envVarMap.isEmpty()) {
        HadoopUtils.applyEnvironmentVariables(jobConf, m_envVarMap, m_envVars);
    }

}

From source file:com.ibm.jaql.lang.expr.hadoop.NativeMapReduceExpr.java

License:Apache License

private JsonRecord eval_0_0(Configuration conf) throws Exception {

    JobConf job = new JobConf(conf);
    // set the jar if needed
    if (useSessionJarDefault.get()) {
        File jFile = ClassLoaderMgr.getExtensionJar();
        if (jFile != null) {
            job.setJar(jFile.getAbsolutePath());
        } else {/*from  w ww  .  jav a2 s. co  m*/
            job.setJarByClass(NativeMapReduceExpr.class);
        }
    }

    // submit the job
    boolean status = true;
    try {
        //JobClient.runJob(job);
        Util.submitJob(new JsonString(NativeMapReduceExpr.class.getName()), job);
    } catch (IOException e) {
        status = false;
        e.printStackTrace();
        LOG.warn("native map-reduce job failed", e);
    }
    // setup the return value
    BufferedJsonRecord ret = new BufferedJsonRecord();
    ret.add(STATUS, (status) ? JsonBool.TRUE : JsonBool.FALSE);

    return ret;
}

From source file:com.liveramp.hank.hadoop.HadoopDomainBuilder.java

License:Apache License

public static void main(String[] args) throws IOException, InvalidConfigurationException {
    CommandLineChecker.check(args,//ww w .j a v a 2s.  co m
            new String[] { "domain name", "config path", "jobjar", "input path", "output_path" },
            HadoopDomainBuilder.class);
    String domainName = args[0];
    CoordinatorConfigurator configurator = new YamlCoordinatorConfigurator(args[1]);
    String jobJar = args[2];
    String inputPath = args[3];
    String outputPath = args[4];

    DomainBuilderProperties properties = new DomainBuilderProperties(domainName, configurator)
            .setOutputPath(outputPath);
    JobConf conf = new JobConf();
    conf.setJar(jobJar);
    conf.setJobName(HadoopDomainBuilder.class.getSimpleName() + " Domain " + domainName + ", Output path: "
            + outputPath);
    HadoopDomainBuilder builder = new HadoopDomainBuilder(conf, inputPath, SequenceFileInputFormat.class,
            DomainBuilderMapperDefault.class);
    LOG.info("Building Hank domain " + domainName + " from input " + inputPath
            + " and coordinator configuration " + configurator);
    // TODO: Create DomainVersionProperties
    throw new NotImplementedException("TODO: Create DomainVersionProperties");
    // builder.buildHankDomain(properties, null);
}

From source file:com.liveramp.hank.hadoop.HadoopDomainCompactor.java

License:Apache License

public static void main(String[] args) throws IOException, InvalidConfigurationException {
    CommandLineChecker.check(args, new String[] { "domain name", "version to compact number",
            "mapred.task.timeout", "config path", "jobjar" }, HadoopDomainCompactor.class);
    String domainName = args[0];/*w w  w  . j a v a2 s  .c o  m*/
    Integer versionToCompactNumber = Integer.valueOf(args[1]);
    Integer mapredTaskTimeout = Integer.valueOf(args[2]);
    CoordinatorConfigurator configurator = new YamlCoordinatorConfigurator(args[3]);
    String jobJar = args[4];

    DomainCompactorProperties properties = new DomainCompactorProperties(domainName, versionToCompactNumber,
            configurator);
    JobConf conf = new JobConf();
    conf.setJar(jobJar);
    conf.set("mapred.task.timeout", mapredTaskTimeout.toString());
    conf.setJobName(HadoopDomainCompactor.class.getSimpleName() + " Domain " + domainName + ", Version "
            + versionToCompactNumber);
    HadoopDomainCompactor compactor = new HadoopDomainCompactor(conf);
    LOG.info("Compacting Hank domain " + domainName + " version " + versionToCompactNumber
            + " with coordinator configuration " + configurator);
    compactor.buildHankDomain(properties,
            new IncrementalDomainVersionProperties.Base("Version " + versionToCompactNumber + " compacted"));
}

From source file:com.rapleaf.hank.hadoop.HadoopDomainCompactor.java

License:Apache License

public static void main(String[] args) throws IOException, InvalidConfigurationException {
    CommandLineChecker.check(args, new String[] { "domain name", "version to compact number",
            "mapred.task.timeout", "config path", "jobjar" }, HadoopDomainCompactor.class);
    String domainName = args[0];/*from ww  w . j  a  v a  2 s .co  m*/
    Integer versionToCompactNumber = Integer.valueOf(args[1]);
    Integer mapredTaskTimeout = Integer.valueOf(args[2]);
    CoordinatorConfigurator configurator = new YamlClientConfigurator(args[3]);
    String jobJar = args[4];

    DomainCompactorProperties properties = new DomainCompactorProperties(domainName, versionToCompactNumber,
            configurator);
    JobConf conf = new JobConf();
    conf.setJar(jobJar);
    conf.set("mapred.task.timeout", mapredTaskTimeout.toString());
    conf.setJobName(HadoopDomainCompactor.class.getSimpleName() + " Domain " + domainName + ", Version "
            + versionToCompactNumber);
    HadoopDomainCompactor compactor = new HadoopDomainCompactor(conf);
    LOG.info("Compacting Hank domain " + domainName + " version " + versionToCompactNumber
            + " with coordinator configuration " + configurator);
    compactor.buildHankDomain(properties,
            new IncrementalDomainVersionProperties.Base("Version " + versionToCompactNumber + " compacted"));
}

From source file:com.xiaoxiaomo.mr.utils.kafka.HadoopJob.java

License:Apache License

public int run(String[] args) throws Exception {
    CommandLineParser parser = new PosixParser();
    Options options = buildOptions();//from w ww .j a v a  2  s  .c  om
    CommandLine cmd = parser.parse(options, args);

    if (cmd.hasOption("h") || cmd.getArgs().length == 0) {
        printHelpAndExit(options);
    }

    String hdfsPath = cmd.getArgs()[0];
    Configuration conf = getConf();
    conf.setBoolean("mapred.map.tasks.speculative.execution", false);

    if (cmd.hasOption("topics")) {
        LOG.info("Using topics: " + cmd.getOptionValue("topics"));
        KafkaInputFormat.configureKafkaTopics(conf, cmd.getOptionValue("topics"));
    } else {
        printHelpAndExit(options);
    }

    KafkaInputFormat.configureZkConnection(conf, cmd.getOptionValue("zk-connect", "localhost:2181"));
    if (cmd.hasOption("consumer-group")) {
        CheckpointManager.configureUseZooKeeper(conf,
                cmd.getOptionValue("consumer-group", "dev-hadoop-loader"));
    }

    if (cmd.getOptionValue("autooffset-reset") != null) {
        KafkaInputFormat.configureAutoOffsetReset(conf, cmd.getOptionValue("autooffset-reset"));
    }

    JobConf jobConf = new JobConf(conf);
    if (cmd.hasOption("remote")) {
        String ip = cmd.getOptionValue("remote");
        LOG.info("Default file system: hdfs://" + ip + ":8020/");
        jobConf.set("fs.defaultFS", "hdfs://" + ip + ":8020/");
        LOG.info("Remote jobtracker: " + ip + ":8021");
        jobConf.set("mapred.job.tracker", ip + ":8021");
    }

    Path jarTarget = new Path(
            getClass().getProtectionDomain().getCodeSource().getLocation() + "../kafka-hadoop-loader.jar");

    if (new File(jarTarget.toUri()).exists()) {
        // running from IDE/ as maven
        jobConf.setJar(jarTarget.toUri().getPath());
        LOG.info("Using target jar: " + jarTarget.toString());
    } else {
        // running from jar remotely or locally
        jobConf.setJarByClass(getClass());
        LOG.info("Using parent jar: " + jobConf.getJar());
    }

    Job job = Job.getInstance(jobConf, "kafka.hadoop.loader");

    job.setInputFormatClass(KafkaInputFormat.class);
    job.setMapperClass(HadoopJobMapper.class);
    job.setOutputKeyClass(Text.class);
    job.setOutputValueClass(Text.class);
    job.setOutputFormatClass(MultiOutputFormat.class);
    job.setNumReduceTasks(0);

    MultiOutputFormat.setOutputPath(job, new Path(hdfsPath));
    MultiOutputFormat.setCompressOutput(job, cmd.getOptionValue("compress-output", "on").equals("on"));

    LOG.info("Output hdfs location: {}", hdfsPath);
    LOG.info("Output hdfs compression: {}", MultiOutputFormat.getCompressOutput(job));

    return job.waitForCompletion(true) ? 0 : -1;
}

From source file:datafu.hourglass.test.TestBase.java

License:Apache License

/**
 * Returns a job configuration preconfigured to run against the Hadoop
 * managed by the testcase.//from w  w  w  .  java  2s  .  c o  m
 * @return configuration that works on the testcase Hadoop instance
 */
protected JobConf createJobConf() {
    if (localMR) {
        JobConf conf = new JobConf();
        String jarName = System.getProperty("testjar");
        if (jarName == null) {
            throw new RuntimeException("must set testjar property");
        }
        _log.info("Using jar name: " + jarName);
        conf.setJar(jarName);
        return conf;
    } else {
        return _mrCluster.createJobConf();
    }
}