Example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytabAndReturnUGI

List of usage examples for org.apache.hadoop.security UserGroupInformation loginUserFromKeytabAndReturnUGI

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation loginUserFromKeytabAndReturnUGI.

Prototype

public static UserGroupInformation loginUserFromKeytabAndReturnUGI(String user, String path)
        throws IOException 

Source Link

Document

Log a user in from a keytab file.

Usage

From source file:org.apache.accumulo.test.functional.KerberosIT.java

License:Apache License

@Test
public void testDelegationTokenWithReducedLifetime() throws Throwable {
    // Login as the "root" user
    UserGroupInformation root = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(),
            rootUser.getKeytab().getAbsolutePath());
    log.info("Logged in as {}", rootUser.getPrincipal());

    // As the "root" user, open up the connection and get a delegation token
    final AuthenticationToken dt = root.doAs(new PrivilegedExceptionAction<AuthenticationToken>() {
        @Override/*  w  w w  . j  a v  a2 s  . com*/
        public AuthenticationToken run() throws Exception {
            Connector conn = mac.getConnector(rootUser.getPrincipal(), new KerberosToken());
            log.info("Created connector as {}", rootUser.getPrincipal());
            assertEquals(rootUser.getPrincipal(), conn.whoami());

            return conn.securityOperations()
                    .getDelegationToken(new DelegationTokenConfig().setTokenLifetime(5, TimeUnit.MINUTES));
        }
    });

    AuthenticationTokenIdentifier identifier = ((DelegationTokenImpl) dt).getIdentifier();
    assertTrue("Expected identifier to expire in no more than 5 minutes: " + identifier,
            identifier.getExpirationDate() - identifier.getIssueDate() <= (5 * 60 * 1000));
}

From source file:org.apache.accumulo.test.functional.KerberosProxyIT.java

License:Apache License

@Before
public void startMac() throws Exception {
    MiniClusterHarness harness = new MiniClusterHarness();
    mac = harness.create(getClass().getName(), testName.getMethodName(), new PasswordToken("unused"),
            new MiniClusterConfigurationCallback() {

                @Override//from ww w  .  j ava 2s  .com
                public void configureMiniCluster(MiniAccumuloConfigImpl cfg, Configuration coreSite) {
                    cfg.setNumTservers(1);
                    Map<String, String> siteCfg = cfg.getSiteConfig();
                    // Allow the proxy to impersonate the client user, but no one else
                    siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_USER_IMPERSONATION.getKey(),
                            proxyPrincipal + ":" + kdc.getRootUser().getPrincipal());
                    siteCfg.put(Property.INSTANCE_RPC_SASL_ALLOWED_HOST_IMPERSONATION.getKey(), "*");
                    cfg.setSiteConfig(siteCfg);
                }

            }, kdc);

    mac.start();
    MiniAccumuloConfigImpl cfg = mac.getConfig();

    // Generate Proxy configuration and start the proxy
    proxyProcess = startProxy(cfg);

    // Enabled kerberos auth
    Configuration conf = new Configuration(false);
    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
    UserGroupInformation.setConfiguration(conf);

    boolean success = false;
    ClusterUser rootUser = kdc.getRootUser();
    // Rely on the junit timeout rule
    while (!success) {
        UserGroupInformation ugi;
        try {
            ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(),
                    rootUser.getKeytab().getAbsolutePath());
        } catch (IOException ex) {
            log.info("Login as root is failing", ex);
            Thread.sleep(3000);
            continue;
        }

        TSocket socket = new TSocket(hostname, proxyPort);
        log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
        TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname,
                Collections.singletonMap("javax.security.sasl.qop", "auth"), null, socket);

        final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);

        try {
            // UGI transport will perform the doAs for us
            ugiTransport.open();
            success = true;
        } catch (TTransportException e) {
            Throwable cause = e.getCause();
            if (null != cause && cause instanceof ConnectException) {
                log.info("Proxy not yet up, waiting");
                Thread.sleep(3000);
                proxyProcess = checkProxyAndRestart(proxyProcess, cfg);
                continue;
            }
        } finally {
            if (null != ugiTransport) {
                ugiTransport.close();
            }
        }
    }

    assertTrue("Failed to connect to the proxy repeatedly", success);
}

From source file:org.apache.accumulo.test.functional.KerberosProxyIT.java

License:Apache License

@Test
public void testProxyClient() throws Exception {
    ClusterUser rootUser = kdc.getRootUser();
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(),
            rootUser.getKeytab().getAbsolutePath());

    TSocket socket = new TSocket(hostname, proxyPort);
    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);
    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname,
            Collections.singletonMap("javax.security.sasl.qop", "auth"), null, socket);

    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);

    // UGI transport will perform the doAs for us
    ugiTransport.open();// w  w  w .j  a va  2s. c  o m

    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));

    // Will fail if the proxy can impersonate the client
    ByteBuffer login = client.login(rootUser.getPrincipal(), Collections.<String, String>emptyMap());

    // For all of the below actions, the proxy user doesn't have permission to do any of them, but the client user does.
    // The fact that any of them actually run tells us that impersonation is working.

    // Create a table
    String table = "table";
    if (!client.tableExists(login, table)) {
        client.createTable(login, table, true, TimeType.MILLIS);
    }

    // Write two records to the table
    String writer = client.createWriter(login, table, new WriterOptions());
    Map<ByteBuffer, List<ColumnUpdate>> updates = new HashMap<>();
    ColumnUpdate update = new ColumnUpdate(ByteBuffer.wrap("cf1".getBytes(UTF_8)),
            ByteBuffer.wrap("cq1".getBytes(UTF_8)));
    update.setValue(ByteBuffer.wrap("value1".getBytes(UTF_8)));
    updates.put(ByteBuffer.wrap("row1".getBytes(UTF_8)), Collections.<ColumnUpdate>singletonList(update));
    update = new ColumnUpdate(ByteBuffer.wrap("cf2".getBytes(UTF_8)), ByteBuffer.wrap("cq2".getBytes(UTF_8)));
    update.setValue(ByteBuffer.wrap("value2".getBytes(UTF_8)));
    updates.put(ByteBuffer.wrap("row2".getBytes(UTF_8)), Collections.<ColumnUpdate>singletonList(update));
    client.update(writer, updates);

    // Flush and close the writer
    client.flush(writer);
    client.closeWriter(writer);

    // Open a scanner to the table
    String scanner = client.createScanner(login, table, new ScanOptions());
    ScanResult results = client.nextK(scanner, 10);
    assertEquals(2, results.getResults().size());

    // Check the first key-value
    KeyValue kv = results.getResults().get(0);
    Key k = kv.key;
    ByteBuffer v = kv.value;
    assertEquals(ByteBuffer.wrap("row1".getBytes(UTF_8)), k.row);
    assertEquals(ByteBuffer.wrap("cf1".getBytes(UTF_8)), k.colFamily);
    assertEquals(ByteBuffer.wrap("cq1".getBytes(UTF_8)), k.colQualifier);
    assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
    assertEquals(ByteBuffer.wrap("value1".getBytes(UTF_8)), v);

    // And then the second
    kv = results.getResults().get(1);
    k = kv.key;
    v = kv.value;
    assertEquals(ByteBuffer.wrap("row2".getBytes(UTF_8)), k.row);
    assertEquals(ByteBuffer.wrap("cf2".getBytes(UTF_8)), k.colFamily);
    assertEquals(ByteBuffer.wrap("cq2".getBytes(UTF_8)), k.colQualifier);
    assertEquals(ByteBuffer.wrap(new byte[0]), k.colVisibility);
    assertEquals(ByteBuffer.wrap("value2".getBytes(UTF_8)), v);

    // Close the scanner
    client.closeScanner(scanner);

    ugiTransport.close();
}

From source file:org.apache.accumulo.test.functional.KerberosProxyIT.java

License:Apache License

@Test
public void testDisallowedClientForImpersonation() throws Exception {
    String user = testName.getMethodName();
    File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
    kdc.createPrincipal(keytab, user);//from   www.jav a 2s . c  o  m

    // Login as the new user
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user,
            keytab.getAbsolutePath());

    log.info("Logged in as " + ugi);

    // Expect an AccumuloSecurityException
    thrown.expect(AccumuloSecurityException.class);
    // Error msg would look like:
    //
    // org.apache.accumulo.core.client.AccumuloSecurityException: Error BAD_CREDENTIALS for user Principal in credentials object should match kerberos
    // principal.
    // Expected 'proxy/hw10447.local@EXAMPLE.COM' but was 'testDisallowedClientForImpersonation@EXAMPLE.COM' - Username or Password is Invalid)
    thrown.expect(new ThriftExceptionMatchesPattern(".*Error BAD_CREDENTIALS.*"));
    thrown.expect(new ThriftExceptionMatchesPattern(
            ".*Expected '" + proxyPrincipal + "' but was '" + kdc.qualifyUser(user) + "'.*"));

    TSocket socket = new TSocket(hostname, proxyPort);
    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);

    // Should fail to open the tran
    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname,
            Collections.singletonMap("javax.security.sasl.qop", "auth"), null, socket);

    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);

    // UGI transport will perform the doAs for us
    ugiTransport.open();

    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));

    // Will fail because the proxy can't impersonate this user (per the site configuration)
    try {
        client.login(kdc.qualifyUser(user), Collections.<String, String>emptyMap());
    } finally {
        if (null != ugiTransport) {
            ugiTransport.close();
        }
    }
}

From source file:org.apache.accumulo.test.functional.KerberosProxyIT.java

License:Apache License

@Test
public void testMismatchPrincipals() throws Exception {
    ClusterUser rootUser = kdc.getRootUser();
    // Should get an AccumuloSecurityException and the given message
    thrown.expect(AccumuloSecurityException.class);
    thrown.expect(new ThriftExceptionMatchesPattern(ProxyServer.RPC_ACCUMULO_PRINCIPAL_MISMATCH_MSG));

    // Make a new user
    String user = testName.getMethodName();
    File keytab = new File(kdc.getKeytabDir(), user + ".keytab");
    kdc.createPrincipal(keytab, user);//www .ja v a  2 s. co m

    // Login as the new user
    UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(user,
            keytab.getAbsolutePath());

    log.info("Logged in as " + ugi);

    TSocket socket = new TSocket(hostname, proxyPort);
    log.info("Connecting to proxy with server primary '" + proxyPrimary + "' running on " + hostname);

    // Should fail to open the tran
    TSaslClientTransport transport = new TSaslClientTransport("GSSAPI", null, proxyPrimary, hostname,
            Collections.singletonMap("javax.security.sasl.qop", "auth"), null, socket);

    final UGIAssumingTransport ugiTransport = new UGIAssumingTransport(transport, ugi);

    // UGI transport will perform the doAs for us
    ugiTransport.open();

    AccumuloProxy.Client.Factory factory = new AccumuloProxy.Client.Factory();
    Client client = factory.getClient(new TCompactProtocol(ugiTransport), new TCompactProtocol(ugiTransport));

    // The proxy needs to recognize that the requested principal isn't the same as the SASL principal and fail
    // Accumulo should let this through -- we need to rely on the proxy to dump me before talking to accumulo
    try {
        client.login(rootUser.getPrincipal(), Collections.<String, String>emptyMap());
    } finally {
        if (null != ugiTransport) {
            ugiTransport.close();
        }
    }
}

From source file:org.apache.accumulo.test.replication.KerberosReplicationIT.java

License:Apache License

@Test
public void dataReplicatedToCorrectTable() throws Exception {
    // Login as the root user
    final UserGroupInformation ugi = UserGroupInformation
            .loginUserFromKeytabAndReturnUGI(rootUser.getPrincipal(), rootUser.getKeytab().toURI().toString());
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
        @Override//from w ww. jav a2  s. co m
        public Void run() throws Exception {
            log.info("testing {}", ugi);
            final KerberosToken token = new KerberosToken();
            final Connector primaryConn = primary.getConnector(rootUser.getPrincipal(), token);
            final Connector peerConn = peer.getConnector(rootUser.getPrincipal(), token);

            ClusterUser replicationUser = kdc.getClientPrincipal(0);

            // Create user for replication to the peer
            peerConn.securityOperations().createLocalUser(replicationUser.getPrincipal(), null);

            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_USER.getKey() + PEER_NAME,
                    replicationUser.getPrincipal());
            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEER_KEYTAB.getKey() + PEER_NAME,
                    replicationUser.getKeytab().getAbsolutePath());

            // ...peer = AccumuloReplicaSystem,instanceName,zookeepers
            primaryConn.instanceOperations().setProperty(Property.REPLICATION_PEERS.getKey() + PEER_NAME,
                    ReplicaSystemFactory.getPeerConfigurationValue(AccumuloReplicaSystem.class,
                            AccumuloReplicaSystem.buildConfiguration(peerConn.getInstance().getInstanceName(),
                                    peerConn.getInstance().getZooKeepers())));

            String primaryTable1 = "primary", peerTable1 = "peer";

            // Create tables
            primaryConn.tableOperations().create(primaryTable1);
            String masterTableId1 = primaryConn.tableOperations().tableIdMap().get(primaryTable1);
            Assert.assertNotNull(masterTableId1);

            peerConn.tableOperations().create(peerTable1);
            String peerTableId1 = peerConn.tableOperations().tableIdMap().get(peerTable1);
            Assert.assertNotNull(peerTableId1);

            // Grant write permission
            peerConn.securityOperations().grantTablePermission(replicationUser.getPrincipal(), peerTable1,
                    TablePermission.WRITE);

            // Replicate this table to the peerClusterName in a table with the peerTableId table id
            primaryConn.tableOperations().setProperty(primaryTable1, Property.TABLE_REPLICATION.getKey(),
                    "true");
            primaryConn.tableOperations().setProperty(primaryTable1,
                    Property.TABLE_REPLICATION_TARGET.getKey() + PEER_NAME, peerTableId1);

            // Write some data to table1
            BatchWriter bw = primaryConn.createBatchWriter(primaryTable1, new BatchWriterConfig());
            long masterTable1Records = 0l;
            for (int rows = 0; rows < 2500; rows++) {
                Mutation m = new Mutation(primaryTable1 + rows);
                for (int cols = 0; cols < 100; cols++) {
                    String value = Integer.toString(cols);
                    m.put(value, "", value);
                    masterTable1Records++;
                }
                bw.addMutation(m);
            }

            bw.close();

            log.info("Wrote all data to primary cluster");

            Set<String> filesFor1 = primaryConn.replicationOperations().referencedFiles(primaryTable1);

            // Restart the tserver to force a close on the WAL
            for (ProcessReference proc : primary.getProcesses().get(ServerType.TABLET_SERVER)) {
                primary.killProcess(ServerType.TABLET_SERVER, proc);
            }
            primary.exec(TabletServer.class);

            log.info("Restarted the tserver");

            // Read the data -- the tserver is back up and running and tablets are assigned
            Iterators.size(primaryConn.createScanner(primaryTable1, Authorizations.EMPTY).iterator());

            // Wait for both tables to be replicated
            log.info("Waiting for {} for {}", filesFor1, primaryTable1);
            primaryConn.replicationOperations().drain(primaryTable1, filesFor1);

            long countTable = 0l;
            for (Entry<Key, Value> entry : peerConn.createScanner(peerTable1, Authorizations.EMPTY)) {
                countTable++;
                Assert.assertTrue(
                        "Found unexpected key-value" + entry.getKey().toStringNoTruncate() + " "
                                + entry.getValue(),
                        entry.getKey().getRow().toString().startsWith(primaryTable1));
            }

            log.info("Found {} records in {}", countTable, peerTable1);
            Assert.assertEquals(masterTable1Records, countTable);

            return null;
        }
    });
}

From source file:org.apache.accumulo.tserver.replication.AccumuloReplicaSystem.java

License:Apache License

@Override
public Status replicate(final Path p, final Status status, final ReplicationTarget target,
        final ReplicaSystemHelper helper) {
    final Instance localInstance = HdfsZooInstance.getInstance();
    final AccumuloConfiguration localConf = new ServerConfigurationFactory(localInstance).getConfiguration();

    final String principal = getPrincipal(localConf, target);
    final File keytab;
    final String password;
    if (localConf.getBoolean(Property.INSTANCE_RPC_SASL_ENABLED)) {
        String keytabPath = getKeytab(localConf, target);
        keytab = new File(keytabPath);
        if (!keytab.exists() || !keytab.isFile()) {
            log.error("{} is not a regular file. Cannot login to replicate", keytabPath);
            return status;
        }// ww w  .j  a va2s  .com
        password = null;
    } else {
        keytab = null;
        password = getPassword(localConf, target);
    }

    if (null != keytab) {
        try {
            final UserGroupInformation accumuloUgi = UserGroupInformation.getCurrentUser();
            // Get a UGI with the principal + keytab
            UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal,
                    keytab.getAbsolutePath());

            // Run inside a doAs to avoid nuking the Tserver's user
            return ugi.doAs(new PrivilegedAction<Status>() {
                @Override
                public Status run() {
                    KerberosToken token;
                    try {
                        // Do *not* replace the current user
                        token = new KerberosToken(principal, keytab);
                    } catch (IOException e) {
                        log.error("Failed to create KerberosToken", e);
                        return status;
                    }
                    ClientContext peerContext = getContextForPeer(localConf, target, principal, token);
                    return _replicate(p, status, target, helper, localConf, peerContext, accumuloUgi);
                }
            });
        } catch (IOException e) {
            // Can't log in, can't replicate
            log.error("Failed to perform local login", e);
            return status;
        }
    } else {
        // Simple case: make a password token, context and then replicate
        PasswordToken token = new PasswordToken(password);
        ClientContext peerContext = getContextForPeer(localConf, target, principal, token);
        return _replicate(p, status, target, helper, localConf, peerContext, null);
    }
}

From source file:org.apache.ambari.view.slider.SliderAppsViewControllerImpl.java

License:Apache License

private <T> T invokeHDFSClientRunnable(final HDFSClientRunnable<T> runnable,
        final Map<String, String> hadoopConfigs) throws IOException, InterruptedException {
    ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
    try {/*from  w  w w  .  ja  v a 2 s  .co m*/
        boolean securityEnabled = Boolean.valueOf(hadoopConfigs.get("security_enabled"));
        final HdfsConfiguration hdfsConfiguration = new HdfsConfiguration();
        for (Entry<String, String> entry : hadoopConfigs.entrySet()) {
            hdfsConfiguration.set(entry.getKey(), entry.getValue());
        }
        UserGroupInformation.setConfiguration(hdfsConfiguration);
        UserGroupInformation sliderUser;
        String loggedInUser = getUserToRunAs(hadoopConfigs);
        if (securityEnabled) {
            String viewPrincipal = getViewParameterValue(PARAM_VIEW_PRINCIPAL);
            String viewPrincipalKeytab = getViewParameterValue(PARAM_VIEW_PRINCIPAL_KEYTAB);
            UserGroupInformation ambariUser = UserGroupInformation
                    .loginUserFromKeytabAndReturnUGI(viewPrincipal, viewPrincipalKeytab);
            if (loggedInUser.equals(ambariUser.getShortUserName())) {
                // HDFS throws exception when caller tries to impresonate themselves.
                // User: admin@EXAMPLE.COM is not allowed to impersonate admin
                sliderUser = ambariUser;
            } else {
                sliderUser = UserGroupInformation.createProxyUser(loggedInUser, ambariUser);
            }
        } else {
            sliderUser = UserGroupInformation.getBestUGI(null, loggedInUser);
        }
        try {
            T value = sliderUser.doAs(new PrivilegedExceptionAction<T>() {
                @Override
                public T run() throws Exception {
                    String fsPath = hadoopConfigs.get("fs.defaultFS");
                    FileSystem fs = FileSystem.get(URI.create(fsPath), hdfsConfiguration);
                    try {
                        return runnable.run(fs);
                    } finally {
                        fs.close();
                    }
                }
            });
            return value;
        } catch (UndeclaredThrowableException e) {
            throw e;
        }
    } finally {
        Thread.currentThread().setContextClassLoader(currentClassLoader);
    }
}

From source file:org.apache.ambari.view.slider.SliderAppsViewControllerImpl.java

License:Apache License

private <T> T invokeSliderClientRunnable(final SliderClientContextRunnable<T> runnable)
        throws IOException, InterruptedException, YarnException {
    ClassLoader currentClassLoader = Thread.currentThread().getContextClassLoader();
    Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
    try {// w ww  .jav  a2  s  . com
        boolean securityEnabled = Boolean.valueOf(getHadoopConfigs().get("security_enabled"));
        UserGroupInformation.setConfiguration(getSliderClientConfiguration());
        UserGroupInformation sliderUser;
        String loggedInUser = getUserToRunAs();
        if (securityEnabled) {
            String viewPrincipal = getViewParameterValue(PARAM_VIEW_PRINCIPAL);
            String viewPrincipalKeytab = getViewParameterValue(PARAM_VIEW_PRINCIPAL_KEYTAB);
            UserGroupInformation ambariUser = UserGroupInformation
                    .loginUserFromKeytabAndReturnUGI(viewPrincipal, viewPrincipalKeytab);
            if (loggedInUser.equals(ambariUser.getShortUserName())) {
                // HDFS throws exception when caller tries to impresonate themselves.
                // User: admin@EXAMPLE.COM is not allowed to impersonate admin
                sliderUser = ambariUser;
            } else {
                sliderUser = UserGroupInformation.createProxyUser(loggedInUser, ambariUser);
            }
        } else {
            sliderUser = UserGroupInformation.getBestUGI(null, loggedInUser);
        }
        try {
            T value = sliderUser.doAs(new PrivilegedExceptionAction<T>() {
                @Override
                public T run() throws Exception {
                    final SliderClient sliderClient = createSliderClient();
                    try {
                        return runnable.run(sliderClient);
                    } finally {
                        destroySliderClient(sliderClient);
                    }
                }
            });
            return value;
        } catch (UndeclaredThrowableException e) {
            Throwable cause = e.getCause();
            if (cause instanceof YarnException) {
                YarnException ye = (YarnException) cause;
                throw ye;
            }
            throw e;
        }
    } finally {
        Thread.currentThread().setContextClassLoader(currentClassLoader);
    }
}

From source file:org.apache.apex.engine.security.TokenRenewer.java

License:Apache License

private long renewTokens(final boolean refresh, boolean checkOnly) throws IOException {
    logger.info("{}", checkOnly ? "Checking renewal" : (refresh ? "Refreshing tokens" : "Renewing tokens"));
    long expiryTime = System.currentTimeMillis() + (refresh ? tokenLifeTime : tokenRenewalInterval);

    final String tokenRenewer = UserGroupInformation.getCurrentUser().getUserName();
    logger.debug("Token renewer {}", tokenRenewer);

    File keyTabFile = null;/*from  w w  w . j  a v  a2  s  . co  m*/
    try (FileSystem fs = FileSystem.newInstance(conf)) {
        String destinationDir = FileUtils.getTempDirectoryPath();
        keyTabFile = FSUtil.copyToLocalFileSystem(fs, destinationDir, destinationFile, hdfsKeyTabFile, conf);

        if (principal == null) {
            //principal = UserGroupInformation.getCurrentUser().getUserName();
            principal = UserGroupInformation.getLoginUser().getUserName();
        }
        logger.debug("Principal {}", principal);
        UserGroupInformation ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal,
                keyTabFile.getAbsolutePath());
        if (!checkOnly) {
            try {
                UserGroupInformation currUGI = UserGroupInformation.createProxyUser(tokenRenewer, ugi);
                currUGI.doAs(new PrivilegedExceptionAction<Object>() {
                    @Override
                    public Object run() throws Exception {

                        if (refresh) {
                            Credentials creds = new Credentials();
                            try (FileSystem fs1 = FileSystem.newInstance(conf)) {
                                logger.info("Refreshing fs tokens");
                                fs1.addDelegationTokens(tokenRenewer, creds);
                                logger.info("Refreshed tokens");
                            }
                            if (renewRMToken) {
                                try (YarnClient yarnClient = StramClientUtils.createYarnClient(conf)) {
                                    logger.info("Refreshing rm tokens");
                                    new StramClientUtils.ClientRMHelper(yarnClient, conf)
                                            .addRMDelegationToken(tokenRenewer, creds);
                                    logger.info("Refreshed tokens");
                                }
                            }
                            credentials.addAll(creds);
                        } else {
                            Collection<Token<? extends TokenIdentifier>> tokens = credentials.getAllTokens();
                            for (Token<? extends TokenIdentifier> token : tokens) {
                                logger.debug("Token {}", token);
                                if (token.getKind().equals(HDFS_TOKEN_KIND) || (renewRMToken
                                        && token.getKind().equals(RMDelegationTokenIdentifier.KIND_NAME))) {
                                    logger.info("Renewing token {}", token.getKind());
                                    token.renew(conf);
                                    logger.info("Renewed token");
                                }
                            }
                        }

                        return null;
                    }
                });
                UserGroupInformation.getCurrentUser().addCredentials(credentials);
            } catch (InterruptedException e) {
                logger.error("Error while renewing tokens ", e);
                expiryTime = System.currentTimeMillis();
            } catch (IOException e) {
                logger.error("Error while renewing tokens ", e);
                expiryTime = System.currentTimeMillis();
            }
        }
        if (logger.isDebugEnabled()) {
            logger.debug("number of tokens: {}", credentials.getAllTokens().size());
            Iterator<Token<?>> iter = credentials.getAllTokens().iterator();
            while (iter.hasNext()) {
                Token<?> token = iter.next();
                logger.debug("updated token: {}", token);
            }
        }
    } finally {
        if (keyTabFile != null) {
            keyTabFile.delete();
        }
    }
    return expiryTime;
}