Example usage for org.apache.hadoop.conf Configuration getClassLoader

List of usage examples for org.apache.hadoop.conf Configuration getClassLoader

Introduction

In this page you can find the example usage for org.apache.hadoop.conf Configuration getClassLoader.

Prototype

public ClassLoader getClassLoader() 

Source Link

Document

Get the ClassLoader for this job.

Usage

From source file:org.apache.avro.hadoop.io.AvroSerialization.java

License:Apache License

private static GenericData newDataModelInstance(Class<? extends GenericData> modelClass, Configuration conf) {
    GenericData dataModel;/*w  w w .j av a2  s  .  c  o  m*/
    try {
        Constructor<? extends GenericData> ctor = modelClass.getDeclaredConstructor(ClassLoader.class);
        ctor.setAccessible(true);
        dataModel = ctor.newInstance(conf.getClassLoader());
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    ReflectionUtils.setConf(dataModel, conf);
    return dataModel;
}

From source file:org.apache.giraph.hive.common.HiveUtils.java

License:Apache License

/**
 * Add a file to Configuration tmpfiles from ClassLoader resource
 *
 * @param conf Configuration//from ww w  .  j  a  va2s .  com
 * @param name file name
 * @return true if file found in class loader, false otherwise
 */
private static boolean addToHiveFromClassLoader(Configuration conf, String name) {
    URL url = conf.getClassLoader().getResource(name);
    if (url == null) {
        return false;
    }
    if (LOG.isInfoEnabled()) {
        LOG.info("addToHiveFromClassLoader: Adding " + name + " at " + url + " to Configuration tmpfiles");
    }
    addToStringCollection(conf, "tmpfiles", url.toString());
    return true;
}

From source file:org.apache.giraph.io.hcatalog.HCatGiraphRunner.java

License:Apache License

/**
* set hive configuration//from  w  ww .ja v a2 s.c  o  m
* @param conf Configuration argument
*/
private static void adjustConfigurationForHive(Configuration conf) {
    // when output partitions are used, workers register them to the
    // metastore at cleanup stage, and on HiveConf's initialization, it
    // looks for hive-site.xml from.
    addToStringCollection(conf, "tmpfiles", conf.getClassLoader().getResource("hive-site.xml").toString());

    // Also, you need hive.aux.jars as well
    // addToStringCollection(conf, "tmpjars",
    // conf.getStringCollection("hive.aux.jars.path"));

    // Or, more effectively, we can provide all the jars client needed to
    // the workers as well
    String[] hadoopJars = System.getenv("HADOOP_CLASSPATH").split(File.pathSeparator);
    List<String> hadoopJarURLs = Lists.newArrayList();
    for (String jarPath : hadoopJars) {
        File file = new File(jarPath);
        if (file.exists() && file.isFile()) {
            String jarURL = file.toURI().toString();
            hadoopJarURLs.add(jarURL);
        }
    }
    addToStringCollection(conf, "tmpjars", hadoopJarURLs);
}

From source file:org.apache.hama.pipes.util.DistributedCacheUtil.java

License:Apache License

/**
 * Add the JARs from the given HDFS paths to the Classpath
 * //from w w w  .j ava2s.  c o m
 * @param conf The job's configuration
 */
public static URL[] addJarsToJobClasspath(Configuration conf) {
    URL[] classLoaderURLs = ((URLClassLoader) conf.getClassLoader()).getURLs();
    String files = conf.get("tmpjars", "");

    if (!files.isEmpty()) {
        String[] fileArr = files.split(",");
        URL[] libjars = new URL[fileArr.length + classLoaderURLs.length];

        for (int i = 0; i < fileArr.length; i++) {
            String tmp = fileArr[i];

            URI pathURI;
            try {
                pathURI = new URI(tmp);
            } catch (URISyntaxException e) {
                throw new IllegalArgumentException(e);
            }

            try {
                FileSystem hdfs = FileSystem.get(conf);
                Path pathSrc = new Path(pathURI.getPath());
                // LOG.info("pathSrc: " + pathSrc);

                if (hdfs.exists(pathSrc)) {
                    LocalFileSystem local = LocalFileSystem.getLocal(conf);

                    // File dst = File.createTempFile(pathSrc.getName() + "-", ".jar");
                    Path pathDst = new Path(local.getWorkingDirectory(), pathSrc.getName());

                    LOG.debug("copyToLocalFile: " + pathDst);
                    hdfs.copyToLocalFile(pathSrc, pathDst);
                    local.deleteOnExit(pathDst);

                    libjars[i] = new URL(local.makeQualified(pathDst).toString());
                }

            } catch (IOException ex) {
                throw new RuntimeException("Error setting up classpath", ex);
            }
        }

        // Add old classLoader entries
        int index = fileArr.length;
        for (int i = 0; i < classLoaderURLs.length; i++) {
            libjars[index] = classLoaderURLs[i];
            index++;
        }

        // Set classloader in current conf/thread
        conf.setClassLoader(new URLClassLoader(libjars, conf.getClassLoader()));

        Thread.currentThread().setContextClassLoader(
                new URLClassLoader(libjars, Thread.currentThread().getContextClassLoader()));

        // URL[] urls = ((URLClassLoader) conf.getClassLoader()).getURLs();
        // for (URL u : urls)
        // LOG.info("newClassLoader: " + u.getPath());

        // Set tmpjars
        // hdfs to local path
        String jars = "";
        for (int i = 0; i < fileArr.length; i++) {
            URL url = libjars[i];
            if (jars.length() > 0) {
                jars += ",";
            }
            jars += url.toString();
        }
        conf.set("tmpjars", jars);

        return libjars;
    }
    return null;
}

From source file:org.apache.lens.server.api.query.DriverSelectorQueryContext.java

License:Apache License

/**
 * Gets the driver query conf.//from   ww  w  .  j ava2s .co  m
 *
 * @param driver    the driver
 * @param queryConf the query conf
 * @return the final query conf
 */
private Configuration mergeConf(LensDriver driver, Configuration queryConf) {
    Configuration conf = new Configuration(driver.getConf());
    for (Map.Entry<String, String> entry : queryConf) {
        conf.set(entry.getKey(), entry.getValue());
    }
    conf.setClassLoader(queryConf.getClassLoader());
    return conf;
}

From source file:org.apache.mrql.Plan.java

License:Apache License

/** retrieve the compiled functional argument of code */
final static Function functional_argument(Configuration conf, Tree code) {
    Node n = (Node) code;//from   w ww.  j a v  a2 s.c  o m
    if (n.name().equals("compiled"))
        try {
            // if the clent has not received the jar file with the compiled arguments, copy the file from HDFS
            if (Compiler.jar_path == null) {
                Path hdfs_path = new Path(conf.get("mrql.jar.path"));
                String local_path = Compiler.tmp_dir + "/mrql_args_" + random_generator.nextInt(1000000)
                        + ".jar";
                FileSystem fs = hdfs_path.getFileSystem(conf);
                fs.copyToLocalFile(false, hdfs_path, new Path("file://" + local_path));
                Compiler.jar_path = local_path;
            }
            ;
            return Compiler.compiled(conf.getClassLoader(), n.children().nth(0).toString());
        } catch (Exception ex) {
            System.err.println("*** Warning: Unable to retrieve the compiled lambda: " + code);
            return ((Lambda) Interpreter.evalE(n.children().nth(1))).lambda();
        }
    else if (code.equals(Interpreter.identity_mapper))
        return new Function() {
            public MRData eval(final MRData x) {
                return new Bag(x);
            }
        };
    else
        return ((Lambda) Interpreter.evalE(code)).lambda();
}

From source file:org.apache.parquet.hadoop.util.SerializationUtil.java

License:Apache License

/**
 * Reads an object (that was written using
 * {@link #writeObjectToConfAsBase64}) from a configuration
 *
 * @param key for the configuration//from  w  w w.  ja  v a2 s  .c om
 * @param conf to read from
 * @return the read object, or null if key is not present in conf
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> T readObjectFromConfAsBase64(String key, Configuration conf) throws IOException {
    String b64 = conf.get(key);
    if (b64 == null) {
        return null;
    }

    byte[] bytes = Base64.decodeBase64(b64.getBytes("UTF-8"));

    ByteArrayInputStream bais = null;
    GZIPInputStream gis = null;
    ObjectInputStream ois = null;

    try {
        bais = new ByteArrayInputStream(bytes);
        gis = new GZIPInputStream(bais);
        ois = new ObjectInputStream(gis);
        try {
            return (T) ois.readObject();
        } catch (ClassNotFoundException e) {
            ClassLoaderObjectInputStream clois = new ClassLoaderObjectInputStream(conf.getClassLoader(), gis);
            return (T) clois.readObject();
        }
    } catch (ClassNotFoundException e) {
        throw new IOException("Could not read object from config with key " + key, e);
    } catch (ClassCastException e) {
        throw new IOException("Couldn't cast object read from config with key " + key, e);
    } finally {
        Closeables.close(ois);
        Closeables.close(gis);
        Closeables.close(bais);
    }
}

From source file:org.apache.pig.backend.hadoop.executionengine.HExecutionEngine.java

License:Apache License

public JobConf getExecConf(Properties properties) throws ExecException {
    JobConf jc = null;/* w ww  . j a v  a  2s . c  om*/
    // Check existence of user provided configs
    String isHadoopConfigsOverriden = properties.getProperty("pig.use.overriden.hadoop.configs");
    if (isHadoopConfigsOverriden != null && isHadoopConfigsOverriden.equals("true")) {
        jc = new JobConf(ConfigurationUtil.toConfiguration(properties));
    } else {
        // Check existence of hadoop-site.xml or core-site.xml in
        // classpath if user provided confs are not being used
        Configuration testConf = new Configuration();
        ClassLoader cl = testConf.getClassLoader();
        URL hadoop_site = cl.getResource(HADOOP_SITE);
        URL core_site = cl.getResource(CORE_SITE);

        if (hadoop_site == null && core_site == null) {
            throw new ExecException(
                    "Cannot find hadoop configurations in classpath "
                            + "(neither hadoop-site.xml nor core-site.xml was found in the classpath)."
                            + " If you plan to use local mode, please put -x local option in command line",
                    4010);
        }
        jc = new JobConf();
    }
    jc.addResource("pig-cluster-hadoop-site.xml");
    jc.addResource(YARN_SITE);
    return jc;
}

From source file:org.apache.pig.backend.hadoop.executionengine.MRExecutionEngine.java

License:Apache License

@SuppressWarnings({ "deprecation", "resource" })
private void init(Properties properties) throws ExecException {
    // First set the ssh socket factory
    setSSHFactory();/* w  w w.  ja va 2s .c  o  m*/

    String cluster = null;
    String nameNode = null;

    // We need to build a configuration object first in the manner described
    // below
    // and then get back a properties object to inspect the
    // JOB_TRACKER_LOCATION
    // and FILE_SYSTEM_LOCATION. The reason to do this is if we looked only
    // at
    // the existing properties object, we may not get the right settings. So
    // we want
    // to read the configurations in the order specified below and only then
    // look
    // for JOB_TRACKER_LOCATION and FILE_SYSTEM_LOCATION.

    // Hadoop by default specifies two resources, loaded in-order from the
    // classpath:
    // 1. hadoop-default.xml : Read-only defaults for hadoop.
    // 2. hadoop-site.xml: Site-specific configuration for a given hadoop
    // installation.
    // Now add the settings from "properties" object to override any
    // existing properties
    // All of the above is accomplished in the method call below

    JobConf jc = null;
    if (!this.pigContext.getExecType().isLocal()) {
        // Check existence of user provided configs
        String isHadoopConfigsOverriden = properties.getProperty("pig.use.overriden.hadoop.configs");
        if (isHadoopConfigsOverriden != null && isHadoopConfigsOverriden.equals("true")) {
            jc = new JobConf(ConfigurationUtil.toConfiguration(properties));
        } else {
            // Check existence of hadoop-site.xml or core-site.xml in
            // classpath
            // if user provided confs are not being used
            Configuration testConf = new Configuration();
            ClassLoader cl = testConf.getClassLoader();
            URL hadoop_site = cl.getResource(HADOOP_SITE);
            URL core_site = cl.getResource(CORE_SITE);

            if (hadoop_site == null && core_site == null) {
                throw new ExecException(
                        "Cannot find hadoop configurations in classpath (neither hadoop-site.xml nor core-site.xml was found in the classpath)."
                                + " If you plan to use local mode, please put -x local option in command line",
                        4010);
            }
            jc = new JobConf();
        }
        jc.addResource("pig-cluster-hadoop-site.xml");
        jc.addResource(YARN_SITE);

        // Trick to invoke static initializer of DistributedFileSystem to
        // add hdfs-default.xml
        // into configuration
        new DistributedFileSystem();

        // the method below alters the properties object by overriding the
        // hadoop properties with the values from properties and recomputing
        // the properties
        recomputeProperties(jc, properties);
    } else {
        // If we are running in local mode we dont read the hadoop conf file
        if (properties.getProperty("mapreduce.framework.name") == null) {
            properties.setProperty("mapreduce.framework.name", "local");
        }
        properties.setProperty(JOB_TRACKER_LOCATION, LOCAL);
        properties.setProperty(FILE_SYSTEM_LOCATION, "file:///");
        properties.setProperty(ALTERNATIVE_FILE_SYSTEM_LOCATION, "file:///");

        jc = new JobConf(false);
        jc.addResource("core-default.xml");
        jc.addResource("mapred-default.xml");
        jc.addResource("yarn-default.xml");
        recomputeProperties(jc, properties);
    }

    cluster = jc.get(JOB_TRACKER_LOCATION);
    nameNode = jc.get(FILE_SYSTEM_LOCATION);
    if (nameNode == null)
        nameNode = (String) pigContext.getProperties().get(ALTERNATIVE_FILE_SYSTEM_LOCATION);

    if (cluster != null && cluster.length() > 0) {
        if (!cluster.contains(":") && !cluster.equalsIgnoreCase(LOCAL)) {
            cluster = cluster + ":50020";
        }
        properties.setProperty(JOB_TRACKER_LOCATION, cluster);
    }

    if (nameNode != null && nameNode.length() > 0) {
        if (!nameNode.contains(":") && !nameNode.equalsIgnoreCase(LOCAL)) {
            nameNode = nameNode + ":8020";
        }
        properties.setProperty(FILE_SYSTEM_LOCATION, nameNode);
    }

    log.info("Connecting to hadoop file system at: " + (nameNode == null ? LOCAL : nameNode));
    // constructor sets DEFAULT_REPLICATION_FACTOR_KEY
    ds = new HDataStorage(properties);

    if (cluster != null && !cluster.equalsIgnoreCase(LOCAL)) {
        log.info("Connecting to map-reduce job tracker at: " + jc.get(JOB_TRACKER_LOCATION));
    }

    // Set job-specific configuration knobs
    jobConf = jc;
}

From source file:org.elasticsearch.hadoop.mr.HadoopIOUtils.java

License:Apache License

public static InputStream open(String resource, Configuration conf) {
    ClassLoader loader = conf.getClassLoader();

    if (loader == null) {
        loader = Thread.currentThread().getContextClassLoader();
    }//from   w w w .  j  a v  a  2 s .  com

    if (loader == null) {
        loader = HadoopIOUtils.class.getClassLoader();
    }

    boolean trace = log.isTraceEnabled();

    try {
        // no prefix means classpath
        if (!resource.contains(":")) {

            InputStream result = loader.getResourceAsStream(resource);
            if (result != null) {
                if (trace) {
                    log.trace(String.format("Loaded resource %s from classpath", resource));
                }
                return result;
            }
            // fall back to the distributed cache
            URI[] uris = DistributedCache.getCacheFiles(conf);
            if (uris != null) {
                for (URI uri : uris) {
                    if (uri.toString().contains(resource)) {
                        if (trace) {
                            log.trace(String.format("Loaded resource %s from distributed cache", resource));
                        }
                        return uri.toURL().openStream();
                    }
                }
            }
        }

        // fall back to file system
        Path p = new Path(resource);
        FileSystem fs = p.getFileSystem(conf);
        return fs.open(p);
    } catch (IOException ex) {
        throw new EsHadoopIllegalArgumentException(
                String.format("Cannot open stream for resource %s", resource));
    }
}