Example usage for org.apache.hadoop.mapred JobConf get

List of usage examples for org.apache.hadoop.mapred JobConf get

Introduction

In this page you can find the example usage for org.apache.hadoop.mapred JobConf get.

Prototype

public String get(String name, String defaultValue) 

Source Link

Document

Get the value of the name.

Usage

From source file:JaqlShell.java

License:Apache License

/**
 * @param mrc/* ww w .j av a2s . co m*/
 * @param conf
 * @throws Exception
 */
private static void setupOverride(MiniMRCluster mrc, Configuration conf) throws Exception {
    File overrideDir = new File(System.getProperty("hadoop.conf.override"));
    if (!overrideDir.exists()) {
        overrideDir.mkdirs();
    }

    // write out the JobConf from MiniMR to the override dir
    JobConf jc = mrc.createJobConf();
    conf.set("mapred.job.tracker", jc.get("mapred.job.tracker", null));
    String name = "mapred.job.tracker.info.port";
    String addr = jc.get(name, null);
    if (addr == null) {
        name = "mapred.job.tracker.http.address";
        addr = jc.get(name, null);
    }
    conf.set(name, addr);
    OutputStream outCore = new FileOutputStream(
            overrideDir.getCanonicalPath() + File.separator + "core-default.xml");
    OutputStream outMapred = new FileOutputStream(
            overrideDir.getCanonicalPath() + File.separator + "mapred-default.xml");
    OutputStream outHdfs = new FileOutputStream(
            overrideDir.getCanonicalPath() + File.separator + "hdfs-default.xml");
    conf.writeXml(outCore);
    conf.writeXml(outMapred);
    conf.writeXml(outHdfs);
    outCore.close();
    outMapred.close();
    outHdfs.close();
}

From source file:azkaban.security.HadoopSecurityManager_H_2_0.java

License:Apache License

private Text getMRTokenRenewerInternal(JobConf jobConf) throws IOException {
    // Taken from Oozie
    ///*from   w  ww. j ava  2  s .  c  o m*/
    // Getting renewer correctly for JT principal also though JT in hadoop
    // 1.x does not have
    // support for renewing/cancelling tokens
    String servicePrincipal = jobConf.get(RM_PRINCIPAL, jobConf.get(JT_PRINCIPAL));
    Text renewer;
    if (servicePrincipal != null) {
        String target = jobConf.get(HADOOP_YARN_RM, jobConf.get(HADOOP_JOB_TRACKER_2));
        if (target == null) {
            target = jobConf.get(HADOOP_JOB_TRACKER);
        }

        String addr = NetUtils.createSocketAddr(target).getHostName();
        renewer = new Text(SecurityUtil.getServerPrincipal(servicePrincipal, addr));
    } else {
        // No security
        renewer = DEFAULT_RENEWER;
    }

    return renewer;
}

From source file:cascading.flow.hadoop.FlowMapper.java

License:Open Source License

@Override
public void configure(JobConf jobConf) {
    try {/*  w  w w. ja v  a  2 s. c  o  m*/
        HadoopUtil.initLog4j(jobConf);

        LOG.info("cascading version: {}", jobConf.get("cascading.version", ""));
        LOG.info("child jvm opts: {}", jobConf.get("mapred.child.java.opts", ""));

        currentProcess = new HadoopFlowProcess(new FlowSession(), jobConf, true);

        String mapNodeState = jobConf.getRaw("cascading.flow.step.node.map");

        if (mapNodeState == null)
            mapNodeState = readStateFromDistCache(jobConf, jobConf.get(FlowStep.CASCADING_FLOW_STEP_ID), "map");

        flowNode = deserializeBase64(mapNodeState, jobConf, BaseFlowNode.class);

        LOG.info("flow node id: {}, ordinal: {}", flowNode.getID(), flowNode.getOrdinal());

        Tap source = Flows.getTapForID(flowNode.getSourceTaps(), jobConf.get("cascading.step.source"));

        streamGraph = new HadoopMapStreamGraph(currentProcess, flowNode, source);

        for (Duct head : streamGraph.getHeads())
            LOG.info("sourcing from: " + ((ElementDuct) head).getFlowElement());

        for (Duct tail : streamGraph.getTails())
            LOG.info("sinking to: " + ((ElementDuct) tail).getFlowElement());

        for (Tap trap : flowNode.getTraps())
            LOG.info("trapping to: " + trap);

        logMemory(LOG, "flow node id: " + flowNode.getID() + ", mem on start");
    } catch (Throwable throwable) {
        reportIfLocal(throwable);

        if (throwable instanceof CascadingException)
            throw (CascadingException) throwable;

        throw new FlowException("internal error during mapper configuration", throwable);
    }
}

From source file:cascading.flow.hadoop.FlowReducer.java

License:Open Source License

@Override
public void configure(JobConf jobConf) {
    try {//www. ja  va  2  s.c om
        super.configure(jobConf);
        HadoopUtil.initLog4j(jobConf);

        LOG.info("cascading version: {}", jobConf.get("cascading.version", ""));
        LOG.info("child jvm opts: {}", jobConf.get("mapred.child.java.opts", ""));

        currentProcess = new HadoopFlowProcess(new FlowSession(), jobConf, false);

        timedIterators = TimedIterator.iterators(new TimedIterator<Tuple>(currentProcess,
                SliceCounters.Read_Duration, SliceCounters.Tuples_Read));

        String reduceNodeState = jobConf.getRaw("cascading.flow.step.node.reduce");

        if (reduceNodeState == null)
            reduceNodeState = readStateFromDistCache(jobConf, jobConf.get(FlowStep.CASCADING_FLOW_STEP_ID),
                    "reduce");

        flowNode = deserializeBase64(reduceNodeState, jobConf, BaseFlowNode.class);

        LOG.info("flow node id: {}, ordinal: {}", flowNode.getID(), flowNode.getOrdinal());

        streamGraph = new HadoopReduceStreamGraph(currentProcess, flowNode,
                Util.getFirst(flowNode.getSourceElements()));

        group = (HadoopGroupGate) streamGraph.getHeads().iterator().next();

        for (Duct head : streamGraph.getHeads())
            LOG.info("sourcing from: " + ((ElementDuct) head).getFlowElement());

        for (Duct tail : streamGraph.getTails())
            LOG.info("sinking to: " + ((ElementDuct) tail).getFlowElement());

        for (Tap trap : flowNode.getTraps())
            LOG.info("trapping to: " + trap);

        logMemory(LOG, "flow node id: " + flowNode.getID() + ", mem on start");
    } catch (Throwable throwable) {
        reportIfLocal(throwable);

        if (throwable instanceof CascadingException)
            throw (CascadingException) throwable;

        throw new FlowException("internal error during reducer configuration", throwable);
    }
}

From source file:cascading.flow.hadoop.HadoopUtil.java

License:Open Source License

public static void initLog4j(JobConf jobConf) {
    String values = jobConf.get("log4j.logger", null);

    if (values == null || values.length() == 0)
        return;/* ww w. ja  v  a2  s . c om*/

    String[] elements = values.split(",");

    for (String element : elements) {
        String[] logger = element.split("=");

        Logger.getLogger(logger[0]).setLevel(Level.toLevel(logger[1]));
    }
}

From source file:cascading.hcatalog.CascadingHCatUtil.java

License:Apache License

protected static List<String> getFilesInHivePartition(Partition part, JobConf jobConf) {
    List<String> result = newArrayList();

    String ignoreFileRegex = jobConf.get(HCatTap.IGNORE_FILE_IN_PARTITION_REGEX, "");
    Pattern ignoreFilePattern = Pattern.compile(ignoreFileRegex);

    try {/*from  www. j  a  v  a  2s .c o m*/
        Path partitionDirPath = new Path(part.getSd().getLocation());
        FileStatus[] partitionContent = partitionDirPath.getFileSystem(jobConf).listStatus(partitionDirPath);
        for (FileStatus currStatus : partitionContent) {
            if (!currStatus.isDir()) {
                if (!ignoreFilePattern.matcher(currStatus.getPath().getName()).matches()) {
                    result.add(currStatus.getPath().toUri().getPath());
                } else {
                    LOG.debug("Ignoring path {} since matches ignore regex {}",
                            currStatus.getPath().toUri().getPath(), ignoreFileRegex);
                }
            }
        }

    } catch (IOException e) {
        logError("Unable to read the content of partition '" + part.getSd().getLocation() + "'", e);
    }

    return result;
}

From source file:cascading.tap.Dfs.java

License:Open Source License

@Override
protected FileSystem getDefaultFileSystem(JobConf jobConf) throws IOException {
    String name = jobConf.get("fs.default.name", "hdfs://localhost:5001/");

    if (name.equals("local") || name.matches(".*://.*") && !name.startsWith("hdfs://"))
        name = "hdfs://localhost:5001/";
    else if (name.indexOf('/') == -1)
        name = "hdfs://" + name;

    return FileSystem.get(URI.create(name), jobConf);
}

From source file:cascading.tap.hadoop.TapCollector.java

License:Open Source License

/**
 * Constructor TapCollector creates a new TapCollector instance.
 *
 * @param tap    of type Tap//from   w ww.  j  ava 2s. c  o  m
 * @param prefix of type String
 * @param conf   of type JobConf
 * @throws IOException when fails to initialize
 */
public TapCollector(Tap tap, String prefix, JobConf conf) throws IOException {
    this.tap = tap;
    this.prefix = prefix == null || prefix.length() == 0 ? null : prefix;
    this.conf = new JobConf(conf);
    this.outputEntry = new TupleEntry(tap.getSinkFields());
    this.filenamePattern = conf.get("cascading.tapcollector.partname", this.filenamePattern);

    initalize();
}

From source file:cascading.tap.Hfs.java

License:Open Source License

private void makeLocal(JobConf conf, Path qualifiedPath, String infoMessage) {
    if (!conf.get("mapred.job.tracker", "").equalsIgnoreCase("local")
            && qualifiedPath.toUri().getScheme().equalsIgnoreCase("file")) {
        if (LOG.isInfoEnabled())
            LOG.info(infoMessage + toString());

        conf.set("mapred.job.tracker", "local"); // force job to run locally
    }//ww  w. j  a v a 2s.  c o  m
}

From source file:cascading.tuple.hadoop.TupleSerialization.java

License:Open Source License

static String getSerializations(JobConf jobConf) {
    return jobConf.get("io.serializations", "");
}