Example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

List of usage examples for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytab.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static void loginUserFromKeytab(String user, String path) throws IOException 

Source Link

Document

Log a user in from a keytab file.

Usage

From source file:org.apache.sentry.tests.e2e.hdfs.TestDbHdfsBase.java

License:Apache License

protected static void kinitFromKeytabFile(String user, String keyTabFile) throws IOException {
    Configuration conf = new Configuration();
    conf.set("hadoop.security.authentication", authenticationType);
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab(user, keyTabFile);
}

From source file:org.apache.sentry.tests.e2e.hive.fs.TestFSBase.java

License:Apache License

protected static void kinitFromKeytabFile(String user, String keyTabFile) throws IOException {
    Configuration conf = new Configuration();
    conf.set(TestFSContants.SENTRY_E2E_TEST_SECURITY_AUTH, authenticationType);
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.loginUserFromKeytab(user, keyTabFile);
}

From source file:org.apache.sentry.tests.e2e.hive.hiveserver.UnmanagedHiveServer.java

License:Apache License

public void kinit(String user) throws Exception {
    UserGroupInformation.loginUserFromKeytab(user, KEYTAB_LOCATION + "/" + user + ".keytab");
    LOGGER.info("Kinited user: " + user + " keytab: " + KEYTAB_LOCATION + "/" + user + ".keytab");
}

From source file:org.apache.slider.server.appmaster.SliderAppMaster.java

License:Apache License

protected void login(String principal, File localKeytabFile) throws IOException, SliderException {
    UserGroupInformation.loginUserFromKeytab(principal, localKeytabFile.getAbsolutePath());
    validateLoginUser(UserGroupInformation.getLoginUser());
}

From source file:org.apache.solr.core.HdfsDirectoryFactory.java

License:Apache License

private void initKerberos() {
    String keytabFile = params.get(KERBEROS_KEYTAB, "").trim();
    if (keytabFile.length() == 0) {
        throw new IllegalArgumentException(
                KERBEROS_KEYTAB + " required because " + KERBEROS_ENABLED + " set to true");
    }//from   w w  w  .j a v  a 2s  .  co  m
    String principal = params.get(KERBEROS_PRINCIPAL, "");
    if (principal.length() == 0) {
        throw new IllegalArgumentException(
                KERBEROS_PRINCIPAL + " required because " + KERBEROS_ENABLED + " set to true");
    }
    synchronized (HdfsDirectoryFactory.class) {
        if (kerberosInit == null) {
            kerberosInit = new Boolean(true);
            Configuration conf = new Configuration();
            conf.set("hadoop.security.authentication", "kerberos");
            UserGroupInformation.setConfiguration(conf);
            LOG.info("Attempting to acquire kerberos ticket with keytab: {}, principal: {} ", keytabFile,
                    principal);
            try {
                UserGroupInformation.loginUserFromKeytab(principal, keytabFile);
            } catch (IOException ioe) {
                throw new RuntimeException(ioe);
            }
            LOG.info("Got Kerberos ticket");
        }
    }
}

From source file:org.apache.sqoop.security.Authentication.KerberosAuthenticationHandler.java

License:Apache License

public void secureLogin() {
    MapContext mapContext = SqoopConfiguration.getInstance().getContext();
    String keytab = mapContext.getString(SecurityConstants.AUTHENTICATION_KERBEROS_KEYTAB).trim();
    if (keytab.length() == 0) {
        throw new SqoopException(SecurityError.AUTH_0001, SecurityConstants.AUTHENTICATION_KERBEROS_KEYTAB);
    }//from  w  w w  . j  a v a 2  s  .  co  m
    keytabFile = keytab;

    String principal = mapContext.getString(SecurityConstants.AUTHENTICATION_KERBEROS_PRINCIPAL).trim();
    if (principal.length() == 0) {
        throw new SqoopException(SecurityError.AUTH_0002, SecurityConstants.AUTHENTICATION_KERBEROS_PRINCIPAL);
    }
    keytabPrincipal = principal;

    Configuration conf = new Configuration();
    conf.set(get_hadoop_security_authentication(), SecurityConstants.TYPE.KERBEROS.name());
    UserGroupInformation.setConfiguration(conf);
    try {
        String hostPrincipal = SecurityUtil.getServerPrincipal(principal, "0.0.0.0");
        UserGroupInformation.loginUserFromKeytab(hostPrincipal, keytab);
    } catch (IOException ex) {
        throw new SqoopException(SecurityError.AUTH_0003, ex);
    }
    LOG.info("Using Kerberos authentication, principal [" + principal + "] keytab [" + keytab + "]");
}

From source file:org.apache.storm.hdfs.blobstore.HdfsBlobStore.java

License:Apache License

/**
 * Allow a Hadoop Configuration to be passed for testing. If it's null then the hadoop configs
 * must be in your classpath./*from  w  w w  . ja  va  2 s  .co m*/
 */
protected void prepareInternal(Map conf, String overrideBase, Configuration hadoopConf) {
    this.conf = conf;
    if (overrideBase == null) {
        overrideBase = (String) conf.get(Config.BLOBSTORE_DIR);
    }
    if (overrideBase == null) {
        throw new RuntimeException("You must specify a blobstore directory for HDFS to use!");
    }
    LOG.debug("directory is: {}", overrideBase);
    try {
        // if a HDFS keytab/principal have been supplied login, otherwise assume they are
        // logged in already or running insecure HDFS.
        String principal = (String) conf.get(Config.BLOBSTORE_HDFS_PRINCIPAL);
        String keyTab = (String) conf.get(Config.BLOBSTORE_HDFS_KEYTAB);

        if (principal != null && keyTab != null) {
            UserGroupInformation.loginUserFromKeytab(principal, keyTab);
        } else {
            if (principal == null && keyTab != null) {
                throw new RuntimeException("You must specify an HDFS principal to go with the keytab!");

            } else {
                if (principal != null && keyTab == null) {
                    throw new RuntimeException("You must specify HDFS keytab go with the principal!");
                }
            }
        }
    } catch (IOException e) {
        throw new RuntimeException("Error logging in from keytab!", e);
    }
    Path baseDir = new Path(overrideBase, BASE_BLOBS_DIR_NAME);
    try {
        if (hadoopConf != null) {
            _hbs = new HdfsBlobStoreImpl(baseDir, conf, hadoopConf);
        } else {
            _hbs = new HdfsBlobStoreImpl(baseDir, conf);
        }
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
    _localSubject = getHadoopUser();
    _aclHandler = new BlobStoreAclHandler(conf);
}

From source file:org.apache.storm.hive.common.HiveUtils.java

License:Apache License

public static synchronized UserGroupInformation authenticate(String keytab, String principal)
        throws AuthenticationFailed {
    File kfile = new File(keytab);
    if (!(kfile.isFile() && kfile.canRead())) {
        throw new IllegalArgumentException("The keyTab file: " + keytab + " is nonexistent or can't read. "
                + "Please specify a readable keytab file for Kerberos auth.");
    }/*from   www  .  j  a v a2 s.  c o m*/
    try {
        principal = SecurityUtil.getServerPrincipal(principal, "");
    } catch (Exception e) {
        throw new AuthenticationFailed("Host lookup error when resolving principal " + principal, e);
    }
    try {
        UserGroupInformation.loginUserFromKeytab(principal, keytab);
        return UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new AuthenticationFailed("Login failed for principal " + principal, e);
    }
}

From source file:org.apache.zeppelin.impala.security.JDBCSecurityImpl.java

License:Apache License

/***
 * @param properties/*  w  ww . j  a v a2s .c  om*/
 */
public static void createSecureConfiguration(Properties properties) {
    AuthenticationMethod authType = getAuthtype(properties);

    switch (authType) {
    case KERBEROS:
        Configuration conf = new org.apache.hadoop.conf.Configuration();
        conf.set("hadoop.security.authentication", KERBEROS.toString());
        UserGroupInformation.setConfiguration(conf);
        try {
            UserGroupInformation.loginUserFromKeytab(properties.getProperty("zeppelin.jdbc.principal"),
                    properties.getProperty("zeppelin.jdbc.keytab.location"));
        } catch (IOException e) {
            LOGGER.error("Failed to get either keytab location or principal name in the " + "interpreter", e);
        }
    }
}

From source file:org.apache.zeppelin.interpreter.SnappyDataZeppelinInterpreter.java

License:Apache License

@Override
public void open() {
    // set properties and do login before creating any spark stuff for secured cluster
    if (getProperty("master").equals("yarn-client")) {
        System.setProperty("SPARK_YARN_MODE", "true");
    }//from w ww .  j  a  v  a2 s . com
    if (getProperty().containsKey("spark.yarn.keytab") && getProperty().containsKey("spark.yarn.principal")) {
        try {
            String keytab = getProperty().getProperty("spark.yarn.keytab");
            String principal = getProperty().getProperty("spark.yarn.principal");
            UserGroupInformation.loginUserFromKeytab(principal, keytab);
        } catch (IOException e) {
            throw new RuntimeException("Can not pass kerberos authentication", e);
        }
    }

    conf = new SparkConf();
    URL[] urls = getClassloaderUrls();

    // Very nice discussion about how scala compiler handle classpath
    // https://groups.google.com/forum/#!topic/scala-user/MlVwo2xCCI0

    /*
     * > val env = new nsc.Settings(errLogger) > env.usejavacp.value = true > val p = new
     * Interpreter(env) > p.setContextClassLoader > Alternatively you can set the class path through
     * nsc.Settings.classpath.
     *
     * >> val settings = new Settings() >> settings.usejavacp.value = true >>
     * settings.classpath.value += File.pathSeparator + >> System.getProperty("java.class.path") >>
     * val in = new Interpreter(settings) { >> override protected def parentClassLoader =
     * getClass.getClassLoader >> } >> in.setContextClassLoader()
     */
    Settings settings = new Settings();

    // process args
    String args = getProperty("args");
    if (args == null) {
        args = "";
    }

    String[] argsArray = args.split(" ");
    LinkedList<String> argList = new LinkedList<String>();
    for (String arg : argsArray) {
        argList.add(arg);
    }

    String sparkReplClassDir = getProperty("spark.repl.classdir");
    if (sparkReplClassDir == null) {
        sparkReplClassDir = System.getProperty("spark.repl.classdir");
    }
    if (sparkReplClassDir == null) {
        sparkReplClassDir = System.getProperty("java.io.tmpdir");
    }

    outputDir = createTempDir(sparkReplClassDir);

    argList.add("-Yrepl-class-based");
    argList.add("-Yrepl-outdir");
    argList.add(outputDir.getAbsolutePath());

    scala.collection.immutable.List<String> list = JavaConversions.asScalaBuffer(argList).toList();

    settings.processArguments(list, true);

    // set classpath for scala compiler
    PathSetting pathSettings = settings.classpath();
    String classpath = "";
    List<File> paths = currentClassPath();
    for (File f : paths) {
        if (classpath.length() > 0) {
            classpath += File.pathSeparator;
        }
        classpath += f.getAbsolutePath();
    }

    if (urls != null) {
        for (URL u : urls) {
            if (classpath.length() > 0) {
                classpath += File.pathSeparator;
            }
            classpath += u.getFile();
        }
    }

    // add dependency from local repo
    String localRepo = getProperty("zeppelin.interpreter.localRepo");
    if (localRepo != null) {
        File localRepoDir = new File(localRepo);
        if (localRepoDir.exists()) {
            File[] files = localRepoDir.listFiles();
            if (files != null) {
                for (File f : files) {
                    if (classpath.length() > 0) {
                        classpath += File.pathSeparator;
                    }
                    classpath += f.getAbsolutePath();
                }
            }
        }
    }

    pathSettings.v_$eq(classpath);
    settings.scala$tools$nsc$settings$ScalaSettings$_setter_$classpath_$eq(pathSettings);

    // set classloader for scala compiler
    settings.explicitParentLoader_$eq(new Some<ClassLoader>(Thread.currentThread().getContextClassLoader()));
    BooleanSetting b = (BooleanSetting) settings.usejavacp();
    b.v_$eq(true);
    settings.scala$tools$nsc$settings$StandardScalaSettings$_setter_$usejavacp_$eq(b);

    /* Required for scoped mode.
     * In scoped mode multiple scala compiler (repl) generates class in the same directory.
     * Class names is not randomly generated and look like '$line12.$read$$iw$$iw'
     * Therefore it's possible to generated class conflict(overwrite) with other repl generated
     * class.
     *
     * To prevent generated class name conflict,
     * change prefix of generated class name from each scala compiler (repl) instance.
     *
     * In Spark 2.x, REPL generated wrapper class name should compatible with the pattern
     * ^(\$line(?:\d+)\.\$read)(?:\$\$iw)+$
     */
    System.setProperty("scala.repl.name.line", "$line" + this.hashCode());

    // To prevent 'File name too long' error on some file system.
    MutableSettings.IntSetting numClassFileSetting = settings.maxClassfileName();
    numClassFileSetting.v_$eq(128);
    settings.scala$tools$nsc$settings$ScalaSettings$_setter_$maxClassfileName_$eq(numClassFileSetting);

    synchronized (sharedInterpreterLock) {
        /* create scala repl */

        this.interpreter = new SparkILoop((java.io.BufferedReader) null, new PrintWriter(out));

        interpreter.settings_$eq(settings);

        interpreter.createInterpreter();

        intp = ZeppelinIntpUtil.invokeMethod(interpreter, "intp");
        ZeppelinIntpUtil.invokeMethod(intp, "setContextClassLoader");
        ZeppelinIntpUtil.invokeMethod(intp, "initializeSynchronous");

        if (ZeppelinIntpUtil.findClass("org.apache.spark.repl.SparkJLineCompletion", true) != null) {
            completer = ZeppelinIntpUtil.instantiateClass("org.apache.spark.repl.SparkJLineCompletion",
                    new Class[] { ZeppelinIntpUtil.findClass("org.apache.spark.repl.SparkIMain") },
                    new Object[] { intp });
        } else if (ZeppelinIntpUtil.findClass("scala.tools.nsc.interpreter.PresentationCompilerCompleter",
                true) != null) {
            completer = ZeppelinIntpUtil.instantiateClass(
                    "scala.tools.nsc.interpreter.PresentationCompilerCompleter", new Class[] { IMain.class },
                    new Object[] { intp });
        } else if (ZeppelinIntpUtil.findClass("scala.tools.nsc.interpreter.JLineCompletion", true) != null) {
            completer = ZeppelinIntpUtil.instantiateClass("scala.tools.nsc.interpreter.JLineCompletion",
                    new Class[] { IMain.class }, new Object[] { intp });
        }
        sparkSession = getSparkSession();
        sc = getSparkContext();
        if (sc.getPoolForName("fair").isEmpty()) {
            Value schedulingMode = org.apache.spark.scheduler.SchedulingMode.FAIR();
            int minimumShare = 0;
            int weight = 1;
            Pool pool = new Pool("fair", schedulingMode, minimumShare, weight);
            sc.taskScheduler().rootPool().addSchedulable(pool);
        }

        if (null != getProperty(Constants.FS_S3A_ACCESS_KEY)
                && null != getProperty(Constants.FS_S3A_SECRET_KEY)) {
            sc.hadoopConfiguration().set(Constants.FS_S3A_IMPL, "org.apache.hadoop.fs.s3a.S3AFileSystem");
            sc.hadoopConfiguration().set(Constants.FS_S3A_ACCESS_KEY, getProperty(Constants.FS_S3A_ACCESS_KEY));
            sc.hadoopConfiguration().set(Constants.FS_S3A_SECRET_KEY, getProperty(Constants.FS_S3A_SECRET_KEY));
        }
        sparkVersion = SparkVersion.fromVersionString(sc.version());

        snc = getSnappyContext();

        dep = getDependencyResolver();

        z = new ZeppelinContext(sc, snc, null, dep, Integer.parseInt(getProperty("zeppelin.spark.maxResult")));

        interpret("@transient val _binder = new java.util.HashMap[String, Object]()");
        Map<String, Object> binder;

        binder = (Map<String, Object>) getLastObject();
        binder.put("sc", sc);
        binder.put("snc", snc);
        binder.put("z", z);

        binder.put("spark", sparkSession);

        interpret("@transient val z = "
                + "_binder.get(\"z\").asInstanceOf[org.apache.zeppelin.spark.ZeppelinContext]");
        interpret("@transient val sc = " + "_binder.get(\"sc\").asInstanceOf[org.apache.spark.SparkContext]");
        // Injecting SnappyContext in repl
        interpret("@transient val snc = "
                + "_binder.get(\"snc\").asInstanceOf[org.apache.spark.sql.SnappyContext]");
        interpret("@transient val snappyContext = "
                + "_binder.get(\"snc\").asInstanceOf[org.apache.spark.sql.SnappyContext]");

        interpret("@transient val spark = "
                + "_binder.get(\"spark\").asInstanceOf[org.apache.spark.sql.SparkSession]");

        interpret("import org.apache.spark.SparkContext._");
        interpret("import org.apache.spark.sql.SnappyContext._");
        interpret("import org.apache.spark.sql.{Row, SaveMode, SnappyContext}");
        if (importImplicit()) {
            interpret("import spark.implicits._");
            interpret("import spark.sql");
            interpret("import org.apache.spark.sql.functions._");

        }
    }

    /* Temporary disabling DisplayUtils. see https://issues.apache.org/jira/browse/ZEPPELIN-127
     *
    // Utility functions for display
    intp.interpret("import org.apache.zeppelin.spark.utils.DisplayUtils._");
            
    // Scala implicit value for spark.maxResult
    intp.interpret("import org.apache.zeppelin.spark.utils.SparkMaxResult");
    intp.interpret("implicit val sparkMaxResult = new SparkMaxResult(" +
    Integer.parseInt(getProperty("zeppelin.spark.maxResult")) + ")");
     */

    // add jar from local repo
    if (localRepo != null) {
        File localRepoDir = new File(localRepo);
        if (localRepoDir.exists()) {
            File[] files = localRepoDir.listFiles();
            if (files != null) {
                for (File f : files) {
                    if (f.getName().toLowerCase().endsWith(".jar")) {
                        sc.addJar(f.getAbsolutePath());
                        logger.info("sc.addJar(" + f.getAbsolutePath() + ")");
                    } else {
                        sc.addFile(f.getAbsolutePath());
                        logger.info("sc.addFile(" + f.getAbsolutePath() + ")");
                    }
                }
            }
        }
    }

}