Example usage for org.apache.hadoop.util VersionInfo getVersion

List of usage examples for org.apache.hadoop.util VersionInfo getVersion

Introduction

In this page you can find the example usage for org.apache.hadoop.util VersionInfo getVersion.

Prototype

public static String getVersion() 

Source Link

Document

Get the Hadoop version.

Usage

From source file:JaqlShell.java

License:Apache License

/**
 * @param dir//from   w  w w .ja v a 2 s.c o  m
 * @param numNodes
 * @param format
 * @throws Exception
 */
public void init(String dir, int numNodes) throws Exception {
    String vInfo = VersionInfo.getVersion();
    System.setProperty("test.build.data", dir);
    m_conf = new Configuration();

    // setup conf according to the Hadoop version
    if (vInfo.indexOf("0.20") < 0) {
        throw new Exception("Unsupported Hadoop version: " + vInfo);
    }

    // setup the mini dfs cluster
    m_fs = new MiniDFSCluster(m_conf, numNodes, true, (String[]) null);
    FileSystem filesystem = m_fs.getFileSystem();
    m_conf.set("fs.default.name", filesystem.getUri().toString());
    Path parentdir = filesystem.getHomeDirectory();
    filesystem.mkdirs(parentdir);
    //FSUtils.setVersion(filesystem, parentdir);

    // setup hbase cluster (only if OS is not windows)
    //    if(!System.getProperty("os.name").toLowerCase().contains("win")) {
    //      m_conf.set(HConstants.HBASE_DIR, parentdir.toString());      
    //      Path hdfsTestDir = filesystem.makeQualified(new Path(m_conf.get(HConstants.HBASE_DIR)));
    //
    //      // prime the hdfs for hbase information...
    //      HRegion root = HRegion.createHRegion(HRegionInfo.ROOT_REGIONINFO, hdfsTestDir, (HBaseConfiguration)m_conf);
    //      HRegion meta = HRegion.createHRegion(HRegionInfo.FIRST_META_REGIONINFO, hdfsTestDir, (HBaseConfiguration)m_conf);
    //      HRegion.addRegionToMETA(root, meta);
    //
    //      // ... and close the root and meta
    //      if (meta != null) {
    //        meta.close();
    //        meta.getLog().closeAndDelete();
    //      }
    //      if (root != null) {
    //        root.close();
    //        root.getLog().closeAndDelete();
    //      }
    //
    //      try
    //      {
    //        this.zooKeeperCluster = new MiniZooKeeperCluster();
    //        File testDir = new File(dir);
    //        int clientPort = this.zooKeeperCluster.startup(testDir);
    //        m_conf.set("hbase.zookeeper.property.clientPort", Integer.toString(clientPort));
    //      } catch(Exception e) {
    //        LOG.error("Unable to startup zookeeper");
    //        throw new IOException(e);
    //      }
    //      try {
    //        // start the mini cluster
    //        m_base = new MiniHBaseCluster((HBaseConfiguration)m_conf, numNodes);
    //      } catch(Exception e) {
    //        LOG.error("Unable to startup hbase");
    //        throw new IOException(e);
    //      }
    //      try {
    //        // opening the META table ensures that cluster is running
    //        new HTable((HBaseConfiguration)m_conf, HConstants.META_TABLE_NAME);        
    //
    //        //setupOverride(conf);
    //      }
    //      catch (Exception e)
    //      {
    //        LOG.warn("Could not verify that hbase is up", e);
    //      }
    //      setupOverride();
    //    }

    m_mr = startMRCluster(numNodes, m_fs.getFileSystem().getName(), m_conf);

    Configuration conf = new Configuration();
    FileSystem fs = FileSystem.get(conf);

    // make the home directory if it does not exist
    Path hd = fs.getWorkingDirectory();
    if (!fs.exists(hd))
        fs.mkdirs(hd);

    // make the $USER/_temporary directory if it does not exist
    Path tmpPath = new Path(hd, "_temporary");
    if (!fs.exists(tmpPath))
        fs.mkdirs(tmpPath);

    //    if (m_base != null)
    //    {
    //      try {
    //        m_admin = new HBaseAdmin((HBaseConfiguration) m_conf);
    //        HTableDescriptor[] tables = m_admin.listTables();
    //        if (tables != null)
    //        {
    //          for (int i = 0; i < tables.length; i++)
    //          {
    //            m_admin.enableTable(tables[i].getName());
    //          }
    //        }
    //      } catch(Exception e) {
    //        LOG.warn("failed to enable hbase tables");
    //      }
    //    }
}

From source file:co.cask.cdap.common.lang.FilterClassLoaderTest.java

License:Apache License

@Test
public void testHadoopResourcesVisible() throws ClassNotFoundException {
    FilterClassLoader classLoader = FilterClassLoader.create(this.getClass().getClassLoader());
    ClassLoader oldClassLoader = ClassLoaders.setContextClassLoader(classLoader);
    try {/*from ww w  . j a  v  a2  s  .  c  o  m*/
        // VersionInfo will based on the context class loader to find the "common-version-info.properties" file.
        // If it is missing/failed to locate that, getVersion() will returns "Unknown".
        Assert.assertNotEquals("Unknown", VersionInfo.getVersion());
    } finally {
        ClassLoaders.setContextClassLoader(oldClassLoader);
    }

    // Load standard Hadoop class. It should pass. The class loader of the loaded class should be the same
    // as the system Configuration class.
    Assert.assertSame(Configuration.class, classLoader.loadClass(Configuration.class.getName()));
}

From source file:co.cask.cdap.data.runtime.main.ClientVersions.java

License:Apache License

public static String getHadoopVersion() {
    return VersionInfo.getVersion();
}

From source file:co.cask.cdap.explore.service.ExploreServiceUtils.java

License:Apache License

/**
 * Check that Hive is in the class path - with a right version.
 *//*  w ww.j  a v  a 2  s  .c  o m*/
public static HiveSupport checkHiveSupport(@Nullable ClassLoader hiveClassLoader) {
    // First try to figure which hive support is relevant based on Hadoop distribution name
    String hadoopVersion = VersionInfo.getVersion();
    for (HiveSupport hiveSupport : HiveSupport.values()) {
        if (hiveSupport.getHadoopVersionPattern() != null
                && hiveSupport.getHadoopVersionPattern().matcher(hadoopVersion).matches()) {
            return hiveSupport;
        }
    }

    String hiveVersion = getHiveVersion(hiveClassLoader);
    LOG.info("Client Hive version: {}", hiveVersion);
    if (hiveVersion.startsWith("0.12.")) {
        return HiveSupport.HIVE_12;
    } else if (hiveVersion.startsWith("0.13.")) {
        return HiveSupport.HIVE_13;
    } else if (hiveVersion.startsWith("0.14.") || hiveVersion.startsWith("1.0.")) {
        return HiveSupport.HIVE_14;
    } else if (hiveVersion.startsWith("1.1.")) {
        return HiveSupport.HIVE_1_1;
    } else if (hiveVersion.startsWith(("1.2"))) {
        return HiveSupport.HIVE_1_2;
    }

    throw new RuntimeException("Hive distribution not supported. Set the configuration '"
            + Constants.Explore.EXPLORE_ENABLED + "' to false to start up without Explore.");
}

From source file:co.cask.cdap.operations.hdfs.HDFSInfo.java

License:Apache License

@Override
public String getVersion() {
    return VersionInfo.getVersion();
}

From source file:co.cask.hydrator.plugin.batch.source.HiveBatchSource.java

License:Apache License

@Override
public void prepareRun(BatchSourceContext context) throws Exception {
    // This line is to load VersionInfo class here to make it available in the HCatInputFormat.setInput call. This is
    // needed to support CDAP 3.2 where we were just exposing the classes of the plugin jar and not the resources.
    LOG.trace("Hadoop version: {}", VersionInfo.getVersion());
    Job job = JobUtils.createInstance();
    Configuration conf = job.getConfiguration();

    conf.set(HiveConf.ConfVars.METASTOREURIS.varname, config.metaStoreURI);

    if (UserGroupInformation.isSecurityEnabled()) {
        conf.set(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL.varname, "true");
        conf.set("hive.metastore.token.signature", HiveAuthFactory.HS2_CLIENT_TOKEN);
    }//from  ww  w  . ja  v a 2  s . com
    // Use the current thread's classloader to ensure that when setInput is called it can access VersionInfo class
    // loaded above. This is needed to support CDAP 3.2 where we were just exposing classes to plugin jars and not
    // resources.
    ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
    try {
        Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
        HCatInputFormat.setInput(conf, config.dbName, config.tableName, config.partitions);
    } finally {
        Thread.currentThread().setContextClassLoader(classLoader);
    }

    HCatSchema hCatSchema = HCatInputFormat.getTableSchema(conf);
    if (config.schema != null) {
        // if the user provided a schema then we should use that schema to read the table. This will allow user to
        // drop non-primitive types and read the table.
        hCatSchema = HiveSchemaConverter.toHiveSchema(Schema.parseJson(config.schema), hCatSchema);
        HCatInputFormat.setOutputSchema(job, hCatSchema);
    }
    HiveSchemaStore.storeHiveSchema(context, config.dbName, config.tableName, hCatSchema);
    context.setInput(
            Input.of(config.referenceName, new SourceInputFormatProvider(HCatInputFormat.class, conf)));
}

From source file:com.cloudera.lib.service.hadoop.HadoopService.java

License:Open Source License

@Override
protected void init() throws ServiceException {
    LOG.info("Using Hadoop JARs version [{}]", VersionInfo.getVersion());
    String security = getServiceConfig().get(AUTHENTICATION_TYPE, "simple").trim();
    if (security.equals("kerberos")) {
        String defaultName = getServer().getName();
        String keytab = System.getProperty("user.home") + "/" + defaultName + ".keytab";
        keytab = getServiceConfig().get(KERBEROS_KEYTAB, keytab).trim();
        if (keytab.length() == 0) {
            throw new ServiceException(HadoopException.ERROR.H01, KERBEROS_KEYTAB);
        }/* w  w w .  j a  v  a 2 s .co  m*/
        String principal = defaultName + "/localhost@LOCALHOST";
        principal = getServiceConfig().get(KERBEROS_PRINCIPAL, principal).trim();
        if (principal.length() == 0) {
            throw new ServiceException(HadoopException.ERROR.H01, KERBEROS_PRINCIPAL);
        }
        Configuration conf = new Configuration();
        conf.set("hadoop.security.authentication", "kerberos");
        UserGroupInformation.setConfiguration(conf);
        try {
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
        } catch (IOException ex) {
            throw new ServiceException(HadoopException.ERROR.H02, ex.getMessage(), ex);
        }
        LOG.info("Using Hadoop Kerberos authentication, principal [{}] keytab [{}]", principal, keytab);
    } else if (security.equals("simple")) {
        Configuration conf = new Configuration();
        conf.set("hadoop.security.authentication", "simple");
        UserGroupInformation.setConfiguration(conf);
        LOG.info("Using Hadoop simple/pseudo authentication, principal [{}]", System.getProperty("user.name"));
    } else {
        throw new ServiceException(HadoopException.ERROR.H09, security);
    }

    serviceHadoopConf = new XConfiguration();
    for (Map.Entry entry : getServiceConfig()) {
        String name = (String) entry.getKey();
        if (name.startsWith(HADOOP_CONF_PREFIX)) {
            name = name.substring(HADOOP_CONF_PREFIX.length());
            String value = (String) entry.getValue();
            serviceHadoopConf.set(name, value);

        }
    }
    setRequiredServiceHadoopConf(serviceHadoopConf);

    LOG.debug("Hadoop default configuration:");
    for (Map.Entry entry : serviceHadoopConf) {
        LOG.debug("  {} = {}", entry.getKey(), entry.getValue());
    }

    jobTrackerWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(JOB_TRACKER_WHITELIST));
    nameNodeWhitelist = toLowerCase(getServiceConfig().getTrimmedStringCollection(NAME_NODE_WHITELIST));
}

From source file:com.cloudera.sqoop.shims.ShimLoader.java

License:Apache License

@SuppressWarnings("unchecked")
/**//from  ww w.j  av a  2 s.  c om
 * Actually load the shim for the current Hadoop version.
 * @param matchExprs a list of regexes against which the current Hadoop
 * version is compared. The first one to hit defines which class/jar to
 * use.
 * @param classNames a list in the same order as matchExprs. This defines
 * what class name to load as the shim class if the Hadoop version matches
 * matchExprs[i].
 * @param jarPatterns a list in the same order as matchExprs. This defines
 * a pattern to select a jar file from which the shim classes should be
 * loaded.
 * @param xface the shim interface that the shim class must match.
 * @param conf an optional Configuration whose context classloader should
 * be updated to the current Thread's contextClassLoader after pushing a
 * new ClassLoader on the stack to load this shim jar.
 */
private static <T> T loadShim(List<String> matchExprs, List<String> classNames, List<String> jarPatterns,
        Class<T> xface, Configuration conf) {
    String version = VersionInfo.getVersion();

    LOG.debug("Loading shims for class : " + xface.getName());
    LOG.debug("Hadoop version: " + version);

    for (int i = 0; i < matchExprs.size(); i++) {
        LOG.debug("Checking: " + matchExprs.get(i));
        if (version.matches(matchExprs.get(i))) {
            String className = classNames.get(i);
            String jarPattern = jarPatterns.get(i);

            if (LOG.isDebugEnabled()) {
                LOG.debug("Version matched regular expression: " + matchExprs.get(i));
                LOG.debug("Trying to load class: " + className);
            }

            // Test to see if the class is already on the classpath.
            try {
                // If we can load the shim directly, we just do so. In this case,
                // there's no need to update the Configuration's classloader,
                // because we didn't modify the classloader stack.
                return getShimInstance(className, xface);
            } catch (Exception e) {
                // Not already present. We'll need to load a jar for this.
                // Ignore this exception.
            }

            try {
                LOG.debug("Searching for jar matching: " + jarPattern);
                loadMatchingShimJar(jarPattern, className);
                LOG.debug("Loading shim from jar");
                T shim = getShimInstance(className, xface);

                if (null != conf) {
                    // Set the context classloader for the base Configuration to
                    // the current one, so we can load more classes from the shim jar.
                    conf.setClassLoader(Thread.currentThread().getContextClassLoader());
                }

                return shim;
            } catch (Exception e) {
                throw new RuntimeException("Could not load shim in class " + className, e);
            }
        }
    }

    throw new RuntimeException("Could not find appropriate Hadoop shim for " + version);
}

From source file:com.google.cloud.hadoop.util.HadoopVersionInfo.java

License:Open Source License

/**
 * Construct a HadoopVersionInfo using the version string contained in
 * {@link VersionInfo#getVersion()}.
 */
public HadoopVersionInfo() {
    this(VersionInfo.getVersion());
}

From source file:com.marklogic.contentpump.ContentPump.java

License:Apache License

public static void logVersions() {
    System.out.println("ContentPump version: " + Versions.getVersion());
    System.out.println("Java version: " + System.getProperty("java.version"));
    System.out.println("Hadoop version: " + VersionInfo.getVersion());
    System.out.println("Supported MarkLogic versions: " + Versions.getMinServerVersion() + " - "
            + Versions.getMaxServerVersion());
}