Example usage for org.apache.hadoop.security UserGroupInformation hasKerberosCredentials

List of usage examples for org.apache.hadoop.security UserGroupInformation hasKerberosCredentials

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation hasKerberosCredentials.

Prototype

public boolean hasKerberosCredentials() 

Source Link

Document

checks if logged in using kerberos

Usage

From source file:org.apache.accumulo.server.AccumuloServerContext.java

License:Apache License

/**
 * A "client-side" assertion for servers to validate that they are logged in as the expected user, per the configuration, before performing any RPC
 *//*from   ww w .jav  a2 s  .  co  m*/
// Should be private, but package-protected so EasyMock will work
void enforceKerberosLogin() {
    final AccumuloConfiguration conf = confFactory.getSiteConfiguration();
    // Unwrap _HOST into the FQDN to make the kerberos principal we'll compare against
    final String kerberosPrincipal = SecurityUtil
            .getServerPrincipal(conf.get(Property.GENERAL_KERBEROS_PRINCIPAL));
    UserGroupInformation loginUser;
    try {
        // The system user should be logged in via keytab when the process is started, not the currentUser() like KerberosToken
        loginUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new RuntimeException("Could not get login user", e);
    }

    checkArgument(loginUser.hasKerberosCredentials(), "Server does not have Kerberos credentials");
    checkArgument(kerberosPrincipal.equals(loginUser.getUserName()),
            "Expected login user to be " + kerberosPrincipal + " but was " + loginUser.getUserName());
}

From source file:org.apache.accumulo.server.init.Initialize.java

License:Apache License

private boolean initialize(Opts opts, String instanceNamePath, VolumeManager fs, String rootUser) {

    UUID uuid = UUID.randomUUID();
    // the actual disk locations of the root table and tablets
    String[] configuredVolumes = VolumeConfiguration.getVolumeUris(SiteConfiguration.getInstance());
    final String rootTabletDir = new Path(
            fs.choose(Optional.<String>empty(), configuredVolumes) + Path.SEPARATOR + ServerConstants.TABLE_DIR
                    + Path.SEPARATOR + RootTable.ID + RootTable.ROOT_TABLET_LOCATION).toString();

    try {//w ww  .  j  a v a 2s.  co m
        initZooKeeper(opts, uuid.toString(), instanceNamePath, rootTabletDir);
    } catch (Exception e) {
        log.error("FATAL: Failed to initialize zookeeper", e);
        return false;
    }

    try {
        initFileSystem(opts, fs, uuid, rootTabletDir);
    } catch (Exception e) {
        log.error("FATAL Failed to initialize filesystem", e);

        if (SiteConfiguration.getInstance().get(Property.INSTANCE_VOLUMES).trim().equals("")) {
            Configuration fsConf = CachedConfiguration.getInstance();

            final String defaultFsUri = "file:///";
            String fsDefaultName = fsConf.get("fs.default.name", defaultFsUri),
                    fsDefaultFS = fsConf.get("fs.defaultFS", defaultFsUri);

            // Try to determine when we couldn't find an appropriate core-site.xml on the classpath
            if (defaultFsUri.equals(fsDefaultName) && defaultFsUri.equals(fsDefaultFS)) {
                log.error("FATAL: Default filesystem value ('fs.defaultFS' or 'fs.default.name') of '"
                        + defaultFsUri + "' was found in the Hadoop configuration");
                log.error(
                        "FATAL: Please ensure that the Hadoop core-site.xml is on the classpath using 'general.classpaths' in accumulo-site.xml");
            }
        }

        return false;
    }

    final ServerConfigurationFactory confFactory = new ServerConfigurationFactory(
            HdfsZooInstance.getInstance());

    // When we're using Kerberos authentication, we need valid credentials to perform initialization. If the user provided some, use them.
    // If they did not, fall back to the credentials present in accumulo-site.xml that the servers will use themselves.
    try {
        final SiteConfiguration siteConf = confFactory.getSiteConfiguration();
        if (siteConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
            final UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
            // We don't have any valid creds to talk to HDFS
            if (!ugi.hasKerberosCredentials()) {
                final String accumuloKeytab = siteConf.get(Property.GENERAL_KERBEROS_KEYTAB),
                        accumuloPrincipal = siteConf.get(Property.GENERAL_KERBEROS_PRINCIPAL);

                // Fail if the site configuration doesn't contain appropriate credentials to login as servers
                if (StringUtils.isBlank(accumuloKeytab) || StringUtils.isBlank(accumuloPrincipal)) {
                    log.error(
                            "FATAL: No Kerberos credentials provided, and Accumulo is not properly configured for server login");
                    return false;
                }

                log.info("Logging in as " + accumuloPrincipal + " with " + accumuloKeytab);

                // Login using the keytab as the 'accumulo' user
                UserGroupInformation.loginUserFromKeytab(accumuloPrincipal, accumuloKeytab);
            }
        }
    } catch (IOException e) {
        log.error("FATAL: Failed to get the Kerberos user", e);
        return false;
    }

    try {
        AccumuloServerContext context = new AccumuloServerContext(confFactory);
        initSecurity(context, opts, uuid.toString(), rootUser);
    } catch (Exception e) {
        log.error("FATAL: Failed to initialize security", e);
        return false;
    }
    return true;
}

From source file:org.apache.accumulo.server.ServerContext.java

License:Apache License

/**
 * A "client-side" assertion for servers to validate that they are logged in as the expected user,
 * per the configuration, before performing any RPC
 *///  w w  w .j  a  v  a 2s.c o m
// Should be private, but package-protected so EasyMock will work
void enforceKerberosLogin() {
    final AccumuloConfiguration conf = getServerConfFactory().getSiteConfiguration();
    // Unwrap _HOST into the FQDN to make the kerberos principal we'll compare against
    final String kerberosPrincipal = SecurityUtil
            .getServerPrincipal(conf.get(Property.GENERAL_KERBEROS_PRINCIPAL));
    UserGroupInformation loginUser;
    try {
        // The system user should be logged in via keytab when the process is started, not the
        // currentUser() like KerberosToken
        loginUser = UserGroupInformation.getLoginUser();
    } catch (IOException e) {
        throw new RuntimeException("Could not get login user", e);
    }

    checkArgument(loginUser.hasKerberosCredentials(), "Server does not have Kerberos credentials");
    checkArgument(kerberosPrincipal.equals(loginUser.getUserName()),
            "Expected login user to be " + kerberosPrincipal + " but was " + loginUser.getUserName());
}

From source file:org.apache.accumulo.test.randomwalk.multitable.CopyTool.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Job job = Job.getInstance(getConf(), this.getClass().getSimpleName());
    job.setJarByClass(this.getClass());

    if (job.getJar() == null) {
        log.error("M/R requires a jar file!  Run mvn package.");
        return 1;
    }/*from w w w. ja v  a  2s  . c  o m*/

    ClientConfiguration clientConf = new ClientConfiguration().withInstance(args[3]).withZkHosts(args[4]);

    job.setInputFormatClass(AccumuloInputFormat.class);
    AccumuloInputFormat.setInputTableName(job, args[2]);
    AccumuloInputFormat.setScanAuthorizations(job, Authorizations.EMPTY);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConf);

    final String principal;
    final AuthenticationToken token;
    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
        // Use the Kerberos creds to request a DelegationToken for MapReduce to use
        // We could use the specified keytab (args[1]), but we're already logged in and don't need to, so we can just use the current user
        KerberosToken kt = new KerberosToken();
        try {
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            // Get the principal via UGI
            principal = user.getUserName();

            // Connector w/ the Kerberos creds
            ZooKeeperInstance inst = new ZooKeeperInstance(clientConf);
            Connector conn = inst.getConnector(principal, kt);

            // Do the explicit check to see if the user has the permission to get a delegation token
            if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
                    SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                log.error(principal + " doesn't have the " + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
                        + " SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                        + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.");
                throw new IllegalStateException(
                        conn.whoami() + " does not have permission to obtain a delegation token");
            }

            // Fetch a delegation token from Accumulo
            token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());

        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    } else {
        // Simple principal + password
        principal = args[0];
        token = new PasswordToken(args[1]);
    }

    AccumuloInputFormat.setConnectorInfo(job, principal, token);
    AccumuloOutputFormat.setConnectorInfo(job, principal, token);

    job.setMapperClass(SeqMapClass.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Mutation.class);

    job.setNumReduceTasks(0);

    job.setOutputFormatClass(AccumuloOutputFormat.class);
    AccumuloOutputFormat.setCreateTables(job, true);
    AccumuloOutputFormat.setDefaultTableName(job, args[5]);
    AccumuloOutputFormat.setZooKeeperInstance(job, clientConf);

    job.waitForCompletion(true);
    return job.isSuccessful() ? 0 : 1;
}

From source file:org.apache.accumulo.test.randomwalk.sequential.MapRedVerifyTool.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Job job = Job.getInstance(getConf(), this.getClass().getSimpleName());
    job.setJarByClass(this.getClass());

    if (job.getJar() == null) {
        log.error("M/R requires a jar file!  Run mvn package.");
        return 1;
    }//from   w ww. j  a va  2s .  c  om

    ClientConfiguration clientConf = ClientConfiguration.loadDefault().withInstance(args[3])
            .withZkHosts(args[4]);

    AccumuloInputFormat.setInputTableName(job, args[2]);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConf);
    AccumuloOutputFormat.setDefaultTableName(job, args[5]);
    AccumuloOutputFormat.setZooKeeperInstance(job, clientConf);

    job.setInputFormatClass(AccumuloInputFormat.class);
    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
        // Better be logged in
        KerberosToken token = new KerberosToken();
        try {
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            String newPrincipal = user.getUserName();

            ZooKeeperInstance inst = new ZooKeeperInstance(clientConf);
            Connector conn = inst.getConnector(newPrincipal, token);

            // Do the explicit check to see if the user has the permission to get a delegation token
            if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
                    SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                log.error(newPrincipal + " doesn't have the " + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
                        + " SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                        + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.");
                throw new IllegalStateException(
                        conn.whoami() + " does not have permission to obtain a delegation token");
            }

            // Fetch a delegation token from Accumulo
            AuthenticationToken dt = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());

            // Set the delegation token instead of the kerberos token
            AccumuloInputFormat.setConnectorInfo(job, newPrincipal, dt);
            AccumuloOutputFormat.setConnectorInfo(job, newPrincipal, dt);
        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    } else {
        AccumuloInputFormat.setConnectorInfo(job, args[0], new PasswordToken(args[1]));
        AccumuloOutputFormat.setConnectorInfo(job, args[0], new PasswordToken(args[1]));
    }

    job.setMapperClass(SeqMapClass.class);
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(IntWritable.class);

    job.setReducerClass(SeqReduceClass.class);
    job.setNumReduceTasks(1);

    job.setOutputFormatClass(AccumuloOutputFormat.class);
    AccumuloOutputFormat.setCreateTables(job, true);

    job.waitForCompletion(true);
    return job.isSuccessful() ? 0 : 1;
}

From source file:org.apache.accumulo.testing.core.randomwalk.multitable.CopyTool.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Job job = Job.getInstance(getConf(), this.getClass().getSimpleName());
    job.setJarByClass(this.getClass());

    if (job.getJar() == null) {
        log.error("M/R requires a jar file!  Run mvn package.");
        return 1;
    }/*from  w w w  .  j  av  a  2  s. c o m*/

    ClientConfiguration clientConf = new ClientConfiguration().withInstance(args[3]).withZkHosts(args[4]);

    job.setInputFormatClass(AccumuloInputFormat.class);
    AccumuloInputFormat.setInputTableName(job, args[2]);
    AccumuloInputFormat.setScanAuthorizations(job, Authorizations.EMPTY);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConf);

    final String principal;
    final AuthenticationToken token;
    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
        // Use the Kerberos creds to request a DelegationToken for MapReduce
        // to use
        // We could use the specified keytab (args[1]), but we're already
        // logged in and don't need to, so we can just use the current user
        KerberosToken kt = new KerberosToken();
        try {
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            // Get the principal via UGI
            principal = user.getUserName();

            // Connector w/ the Kerberos creds
            ZooKeeperInstance inst = new ZooKeeperInstance(clientConf);
            Connector conn = inst.getConnector(principal, kt);

            // Do the explicit check to see if the user has the permission
            // to get a delegation token
            if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
                    SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                log.error(principal + " doesn't have the " + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
                        + " SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                        + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.");
                throw new IllegalStateException(
                        conn.whoami() + " does not have permission to obtain a delegation token");
            }

            // Fetch a delegation token from Accumulo
            token = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());

        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    } else {
        // Simple principal + password
        principal = args[0];
        token = new PasswordToken(args[1]);
    }

    AccumuloInputFormat.setConnectorInfo(job, principal, token);
    AccumuloOutputFormat.setConnectorInfo(job, principal, token);

    job.setMapperClass(SeqMapClass.class);
    job.setMapOutputKeyClass(Text.class);
    job.setMapOutputValueClass(Mutation.class);

    job.setNumReduceTasks(0);

    job.setOutputFormatClass(AccumuloOutputFormat.class);
    AccumuloOutputFormat.setCreateTables(job, true);
    AccumuloOutputFormat.setDefaultTableName(job, args[5]);
    AccumuloOutputFormat.setZooKeeperInstance(job, clientConf);

    job.waitForCompletion(true);
    return job.isSuccessful() ? 0 : 1;
}

From source file:org.apache.accumulo.testing.core.randomwalk.sequential.MapRedVerifyTool.java

License:Apache License

@Override
public int run(String[] args) throws Exception {
    Job job = Job.getInstance(getConf(), this.getClass().getSimpleName());
    job.setJarByClass(this.getClass());

    if (job.getJar() == null) {
        log.error("M/R requires a jar file!  Run mvn package.");
        return 1;
    }//from w  ww. j  a  va2s  .  c o m

    ClientConfiguration clientConf = ClientConfiguration.loadDefault().withInstance(args[3])
            .withZkHosts(args[4]);

    AccumuloInputFormat.setInputTableName(job, args[2]);
    AccumuloInputFormat.setZooKeeperInstance(job, clientConf);
    AccumuloOutputFormat.setDefaultTableName(job, args[5]);
    AccumuloOutputFormat.setZooKeeperInstance(job, clientConf);

    job.setInputFormatClass(AccumuloInputFormat.class);
    if (clientConf.getBoolean(ClientProperty.INSTANCE_RPC_SASL_ENABLED.getKey(), false)) {
        // Better be logged in
        KerberosToken token = new KerberosToken();
        try {
            UserGroupInformation user = UserGroupInformation.getCurrentUser();
            if (!user.hasKerberosCredentials()) {
                throw new IllegalStateException("Expected current user to have Kerberos credentials");
            }

            String newPrincipal = user.getUserName();

            ZooKeeperInstance inst = new ZooKeeperInstance(clientConf);
            Connector conn = inst.getConnector(newPrincipal, token);

            // Do the explicit check to see if the user has the permission
            // to get a delegation token
            if (!conn.securityOperations().hasSystemPermission(conn.whoami(),
                    SystemPermission.OBTAIN_DELEGATION_TOKEN)) {
                log.error(newPrincipal + " doesn't have the " + SystemPermission.OBTAIN_DELEGATION_TOKEN.name()
                        + " SystemPermission neccesary to obtain a delegation token. MapReduce tasks cannot automatically use the client's"
                        + " credentials on remote servers. Delegation tokens provide a means to run MapReduce without distributing the user's credentials.");
                throw new IllegalStateException(
                        conn.whoami() + " does not have permission to obtain a delegation token");
            }

            // Fetch a delegation token from Accumulo
            AuthenticationToken dt = conn.securityOperations().getDelegationToken(new DelegationTokenConfig());

            // Set the delegation token instead of the kerberos token
            AccumuloInputFormat.setConnectorInfo(job, newPrincipal, dt);
            AccumuloOutputFormat.setConnectorInfo(job, newPrincipal, dt);
        } catch (Exception e) {
            final String msg = "Failed to acquire DelegationToken for use with MapReduce";
            log.error(msg, e);
            throw new RuntimeException(msg, e);
        }
    } else {
        AccumuloInputFormat.setConnectorInfo(job, args[0], new PasswordToken(args[1]));
        AccumuloOutputFormat.setConnectorInfo(job, args[0], new PasswordToken(args[1]));
    }

    job.setMapperClass(SeqMapClass.class);
    job.setMapOutputKeyClass(NullWritable.class);
    job.setMapOutputValueClass(IntWritable.class);

    job.setReducerClass(SeqReduceClass.class);
    job.setNumReduceTasks(1);

    job.setOutputFormatClass(AccumuloOutputFormat.class);
    AccumuloOutputFormat.setCreateTables(job, true);

    job.waitForCompletion(true);
    return job.isSuccessful() ? 0 : 1;
}

From source file:org.apache.flink.runtime.security.SecurityContext.java

License:Apache License

public static void install(SecurityConfiguration config) throws Exception {

    // perform static initialization of UGI, JAAS
    if (installedContext != null) {
        LOG.warn("overriding previous security context");
    }//from   w  w w  .ja  va 2 s  .c  om

    // establish the JAAS config
    JaasConfiguration jaasConfig = new JaasConfiguration(config.keytab, config.principal);
    javax.security.auth.login.Configuration.setConfiguration(jaasConfig);

    populateSystemSecurityProperties(config.flinkConf);

    // establish the UGI login user
    UserGroupInformation.setConfiguration(config.hadoopConf);

    UserGroupInformation loginUser;

    if (UserGroupInformation.isSecurityEnabled() && config.keytab != null
            && !StringUtils.isBlank(config.principal)) {
        String keytabPath = (new File(config.keytab)).getAbsolutePath();

        UserGroupInformation.loginUserFromKeytab(config.principal, keytabPath);

        loginUser = UserGroupInformation.getLoginUser();

        // supplement with any available tokens
        String fileLocation = System.getenv(UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION);
        if (fileLocation != null) {
            /*
             * Use reflection API since the API semantics are not available in Hadoop1 profile. Below APIs are
             * used in the context of reading the stored tokens from UGI.
             * Credentials cred = Credentials.readTokenStorageFile(new File(fileLocation), config.hadoopConf);
             * loginUser.addCredentials(cred);
            */
            try {
                Method readTokenStorageFileMethod = Credentials.class.getMethod("readTokenStorageFile",
                        File.class, org.apache.hadoop.conf.Configuration.class);
                Credentials cred = (Credentials) readTokenStorageFileMethod.invoke(null, new File(fileLocation),
                        config.hadoopConf);
                Method addCredentialsMethod = UserGroupInformation.class.getMethod("addCredentials",
                        Credentials.class);
                addCredentialsMethod.invoke(loginUser, cred);
            } catch (NoSuchMethodException e) {
                LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
            }
        }
    } else {
        // login with current user credentials (e.g. ticket cache)
        try {
            //Use reflection API to get the login user object
            //UserGroupInformation.loginUserFromSubject(null);
            Method loginUserFromSubjectMethod = UserGroupInformation.class.getMethod("loginUserFromSubject",
                    Subject.class);
            Subject subject = null;
            loginUserFromSubjectMethod.invoke(null, subject);
        } catch (NoSuchMethodException e) {
            LOG.warn("Could not find method implementations in the shaded jar. Exception: {}", e);
        }

        loginUser = UserGroupInformation.getLoginUser();
        // note that the stored tokens are read automatically
    }

    boolean delegationToken = false;
    final Text HDFS_DELEGATION_KIND = new Text("HDFS_DELEGATION_TOKEN");
    Collection<Token<? extends TokenIdentifier>> usrTok = loginUser.getTokens();
    for (Token<? extends TokenIdentifier> token : usrTok) {
        final Text id = new Text(token.getIdentifier());
        LOG.debug("Found user token " + id + " with " + token);
        if (token.getKind().equals(HDFS_DELEGATION_KIND)) {
            delegationToken = true;
        }
    }

    if (UserGroupInformation.isSecurityEnabled() && !loginUser.hasKerberosCredentials()) {
        //throw an error in non-yarn deployment if kerberos cache is not available
        if (!delegationToken) {
            LOG.error("Hadoop Security is enabled but current login user does not have Kerberos Credentials");
            throw new RuntimeException(
                    "Hadoop Security is enabled but current login user does not have Kerberos Credentials");
        }
    }

    installedContext = new SecurityContext(loginUser);
}

From source file:org.apache.flink.runtime.security.SecurityUtils.java

License:Apache License

public static <T> T runSecured(final FlinkSecuredRunner<T> runner) throws Exception {
    UserGroupInformation.setConfiguration(hdConf);
    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
    if (!ugi.hasKerberosCredentials()) {
        LOG.error("Security is enabled but no Kerberos credentials have been found. "
                + "You may authenticate using the kinit command.");
    }/*from  w  w  w .  ja  v  a2 s .  c o m*/
    return ugi.doAs(new PrivilegedExceptionAction<T>() {
        @Override
        public T run() throws Exception {
            return runner.run();
        }
    });
}

From source file:org.apache.flink.yarn.AbstractYarnClusterDescriptor.java

License:Apache License

@Override
public YarnClusterClient deploy() {
    try {//from  w  ww .j a  v a  2 s . com
        if (UserGroupInformation.isSecurityEnabled()) {
            // note: UGI::hasKerberosCredentials inaccurately reports false
            // for logins based on a keytab (fixed in Hadoop 2.6.1, see HADOOP-10786),
            // so we check only in ticket cache scenario.
            boolean useTicketCache = flinkConfiguration
                    .getBoolean(SecurityOptions.KERBEROS_LOGIN_USETICKETCACHE);

            UserGroupInformation loginUser = UserGroupInformation.getCurrentUser();
            if (loginUser.getAuthenticationMethod() == UserGroupInformation.AuthenticationMethod.KERBEROS
                    && useTicketCache && !loginUser.hasKerberosCredentials()) {
                LOG.error(
                        "Hadoop security with Kerberos is enabled but the login user does not have Kerberos credentials");
                throw new RuntimeException("Hadoop security with Kerberos is enabled but the login user "
                        + "does not have Kerberos credentials");
            }
        }
        return deployInternal();
    } catch (Exception e) {
        throw new RuntimeException("Couldn't deploy Yarn cluster", e);
    }
}