Example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser

List of usage examples for org.apache.hadoop.security UserGroupInformation createRemoteUser

Introduction

In this page you can find the example usage for org.apache.hadoop.security UserGroupInformation createRemoteUser.

Prototype

@InterfaceAudience.Public
@InterfaceStability.Evolving
public static UserGroupInformation createRemoteUser(String user) 

Source Link

Document

Create a user from a login name.

Usage

From source file:org.apache.sentry.tests.e2e.metastore.AbstractMetastoreTestWithStaticConfiguration.java

License:Apache License

public void execPigLatin(String userName, final PigServer pigServer, final String pigLatin) throws Exception {
    UserGroupInformation clientUgi = UserGroupInformation.createRemoteUser(userName);
    clientUgi.doAs(new PrivilegedExceptionAction<Object>() {
        @Override//from  w  w  w  . j a  v a  2s.  c  o  m
        public Void run() throws Exception {
            pigServer.registerQuery(pigLatin);
            return null;
        }
    });
}

From source file:org.apache.sentry.tests.e2e.metastore.TestMetastoreEndToEnd.java

License:Apache License

@Test
public void testListDatabases() throws Exception {
    List<String> dbNames;
    HashSet<String> allDatabaseNames = new HashSet<>(Arrays.asList("default", dbName, dbName2));

    // Create databases and verify the admin can list the database names
    final HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
    dropAllMetastoreDBIfExists(client, false);
    createMetastoreDB(client, dbName);//from  w ww .jav a  2  s .  c om
    createMetastoreDB(client, dbName2);
    UserGroupInformation clientUgi = UserGroupInformation.createRemoteUser(ADMIN1);
    dbNames = clientUgi.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client.getAllDatabases();
        }
    });
    assertThat(dbNames).isNotNull();
    verifyReturnedList(allDatabaseNames, dbNames, true);
    client.close();

    // Verify a user with ALL privileges on a database can get its name
    // and cannot get database name that has no privilege on
    // USER1_1 has ALL at dbName
    final HiveMetaStoreClient client_USER1_1 = context.getMetaStoreClient(USER1_1);
    UserGroupInformation clientUgi_USER1_1 = UserGroupInformation.createRemoteUser(USER1_1);
    dbNames = clientUgi_USER1_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER1_1.getAllDatabases();
        }
    });
    assertThat(dbNames).isNotNull();
    verifyReturnedList(new HashSet<>(Arrays.asList("default", dbName)), dbNames, true);
    client_USER1_1.close();

    // USER2_1 has SELECT at dbName
    final HiveMetaStoreClient client_USER2_1 = context.getMetaStoreClient(USER2_1);
    UserGroupInformation clientUgi_USER2_1 = UserGroupInformation.createRemoteUser(USER2_1);
    dbNames = clientUgi_USER2_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER2_1.getAllDatabases();
        }
    });
    assertThat(dbNames).isNotNull();
    verifyReturnedList(new HashSet<>(Arrays.asList("default", dbName)), dbNames, true);
    //assertThat(dbNames.get(0)).isEqualToIgnoringCase(dbName);
    client.close();

    // USER3_1 has SELECT at dbName.tabName1 and dbName.tabName2
    final HiveMetaStoreClient client_USER3_1 = context.getMetaStoreClient(USER3_1);
    UserGroupInformation clientUgi_USER3_1 = UserGroupInformation.createRemoteUser(USER3_1);
    dbNames = clientUgi_USER3_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER3_1.getAllDatabases();
        }
    });
    assertThat(dbNames).isNotNull();
    verifyReturnedList(new HashSet<>(Arrays.asList("default", dbName)), dbNames, true);
    client.close();

    // USER4_1 has ALL at dbName.tabName1 and dbName2
    final HiveMetaStoreClient client_USER4_1 = context.getMetaStoreClient(USER4_1);
    UserGroupInformation clientUgi_USER4_1 = UserGroupInformation.createRemoteUser(USER4_1);
    dbNames = clientUgi_USER4_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER4_1.getAllDatabases();
        }
    });
    assertThat(dbNames).isNotNull();
    verifyReturnedList(allDatabaseNames, dbNames, true);
    client.close();

    // USER5_1 has CREATE at server
    final HiveMetaStoreClient client_USER5_1 = context.getMetaStoreClient(USER5_1);
    UserGroupInformation clientUgi_USER5_1 = UserGroupInformation.createRemoteUser(USER5_1);
    dbNames = clientUgi_USER5_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER5_1.getAllDatabases();
        }
    });
    assertThat(dbNames).isNotNull();
    verifyReturnedList(allDatabaseNames, dbNames, true);
    client.close();
}

From source file:org.apache.sentry.tests.e2e.metastore.TestMetastoreEndToEnd.java

License:Apache License

@Test
public void testListTables() throws Exception {
    List<String> tableNames;
    HashSet<String> expectedTableNames = new HashSet<>(Arrays.asList(tabName1, tabName2));

    // Create databases and verify the admin can list the database names
    final HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
    dropMetastoreDBIfExists(client, dbName);
    createMetastoreDB(client, dbName);//w  w w  .java2s  .  co m
    createMetastoreTable(client, dbName, tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")));
    createMetastoreTable(client, dbName, tabName2, Lists.newArrayList(new FieldSchema("col1", "int", "")));
    createMetastoreTable(client, dbName, tabName3, Lists.newArrayList(new FieldSchema("col1", "int", "")));
    UserGroupInformation clientUgi = UserGroupInformation.createRemoteUser(ADMIN1);
    tableNames = clientUgi.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client.getAllTables(dbName);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(3);

    dropMetastoreDBIfExists(client, dbName2);
    createMetastoreDB(client, dbName2);
    createMetastoreTable(client, dbName2, tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")));
    createMetastoreTable(client, dbName2, tabName2, Lists.newArrayList(new FieldSchema("col1", "int", "")));
    tableNames = clientUgi.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client.getAllTables(dbName2);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(2);
    client.close();

    // Verify a user with ALL privileges on a database can get its name
    // and cannot get database name that has no privilege on
    // USER1_1 has ALL on dbName
    final HiveMetaStoreClient client_USER1_1 = context.getMetaStoreClient(USER1_1);
    UserGroupInformation clientUgi_USER1_1 = UserGroupInformation.createRemoteUser(USER1_1);
    tableNames = clientUgi_USER1_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER1_1.getAllTables(dbName);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(3);
    tableNames = clientUgi_USER1_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER1_1.getAllTables(dbName2);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(0);

    // USER2_1 has SELECT on dbName
    final HiveMetaStoreClient client_USER2_1 = context.getMetaStoreClient(USER2_1);
    UserGroupInformation clientUgi_USER2_1 = UserGroupInformation.createRemoteUser(USER2_1);
    tableNames = clientUgi_USER2_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER2_1.getAllTables(dbName);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(3);
    tableNames = clientUgi_USER2_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER2_1.getAllTables(dbName2);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(0);

    // USER3_1 has SELECT on dbName.tabName1 and dbName.tabName2
    final HiveMetaStoreClient client_USER3_1 = context.getMetaStoreClient(USER3_1);
    UserGroupInformation clientUgi_USER3_1 = UserGroupInformation.createRemoteUser(USER3_1);
    tableNames = clientUgi_USER3_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER3_1.getAllTables(dbName);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(2);
    assertThat(expectedTableNames).contains(tableNames.get(0));
    assertThat(expectedTableNames).contains(tableNames.get(1));
    tableNames = clientUgi_USER3_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER3_1.getAllTables(dbName2);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(0);
    client.close();

    // USER4_1 ALL on dbName.tabName1 and dbName2
    final HiveMetaStoreClient client_USER4_1 = context.getMetaStoreClient(USER4_1);
    UserGroupInformation clientUgi_USER4_1 = UserGroupInformation.createRemoteUser(USER4_1);
    tableNames = clientUgi_USER4_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER4_1.getAllTables(dbName);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(1); // only has access to tabName1 and tabName2
    assertThat(tableNames.get(0)).isEqualToIgnoringCase(tabName1);
    tableNames = clientUgi_USER4_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER4_1.getAllTables(dbName2);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(2);
    client.close();

    // USER5_1 CREATE on server
    final HiveMetaStoreClient client_USER5_1 = context.getMetaStoreClient(USER5_1);
    UserGroupInformation clientUgi_USER5_1 = UserGroupInformation.createRemoteUser(USER5_1);
    tableNames = clientUgi_USER5_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER5_1.getAllTables(dbName);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(3);
    tableNames = clientUgi_USER5_1.doAs(new PrivilegedExceptionAction<List<String>>() {
        @Override
        public List<String> run() throws Exception {
            return client_USER5_1.getAllTables(dbName2);
        }
    });
    assertThat(tableNames).isNotNull();
    assertThat(tableNames.size()).isEqualTo(2);
    client.close();
}

From source file:org.apache.slider.server.appmaster.rpc.RpcBinder.java

License:Apache License

public static SliderClusterProtocol getProxy(final Configuration conf, ApplicationReport application,
        final int rpcTimeout) throws IOException, SliderException, InterruptedException {

    String host = application.getHost();
    int port = application.getRpcPort();
    String address = host + ":" + port;
    if (host == null || 0 == port) {
        throw new SliderException(SliderExitCodes.EXIT_CONNECTIVITY_PROBLEM,
                "Slider instance " + application.getName() + " isn't providing a valid address for the"
                        + " Slider RPC protocol: " + address);
    }//from   w  w  w  .  j a  v a 2  s  .  c o m

    UserGroupInformation currentUser = UserGroupInformation.getCurrentUser();
    final UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(currentUser.getUserName());
    final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(application.getHost(),
            application.getRpcPort());
    SliderClusterProtocol realProxy;

    log.debug("Connecting to {}", serviceAddr);
    if (UserGroupInformation.isSecurityEnabled()) {
        org.apache.hadoop.yarn.api.records.Token clientToAMToken = application.getClientToAMToken();
        Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr);
        newUgi.addToken(token);
        realProxy = newUgi.doAs(new PrivilegedExceptionAction<SliderClusterProtocol>() {
            @Override
            public SliderClusterProtocol run() throws IOException {
                return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
            }
        });
    } else {
        return connectToServer(serviceAddr, newUgi, conf, rpcTimeout);
    }
    return realProxy;
}

From source file:org.apache.streams.hdfs.WebHdfsPersistReader.java

License:Apache License

private synchronized void connectToWebHDFS() {
    try {//from   w  w  w.ja  v a 2  s.co  m
        LOGGER.info("User : {}", this.hdfsConfiguration.getUser());
        UserGroupInformation ugi = UserGroupInformation.createRemoteUser(this.hdfsConfiguration.getUser());
        ugi.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE);

        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            public Void run() throws Exception {
                Configuration conf = new Configuration();
                conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
                LOGGER.info("WebURI : {}", getURI().toString());
                client = FileSystem.get(getURI(), conf);
                LOGGER.info("Connected to WebHDFS");

                /*
                * ************************************************************************************************
                * This code is an example of how you would work with HDFS and you weren't going over
                * the webHDFS protocol.
                *
                * Smashew: 2013-10-01
                * ************************************************************************************************
                conf.set("fs.defaultFS", "hdfs://hadoop.mdigitallife.com:8020/user/" + userName);
                conf.set("namenode.host","0.0.0.0");
                conf.set("hadoop.job.ugi", userName);
                conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "runner");
                fileSystem.createNewFile(new Path("/user/"+ userName + "/test"));
                FileStatus[] status = fs.listStatus(new Path("/user/" + userName));
                for(int i=0;i<status.length;i++)
                {
                LOGGER.info("Directory: {}", status[i].getPath());
                }
                */
                return null;
            }
        });
    } catch (Exception e) {
        LOGGER.error("There was an error connecting to WebHDFS, please check your settings and try again");
        e.printStackTrace();
    }
}

From source file:org.apache.streams.hdfs.WebHdfsPersistWriter.java

License:Apache License

private synchronized void connectToWebHDFS() {
    try {/*  ww w. j  a  v a 2s .c om*/
        LOGGER.info("User : {}", this.hdfsConfiguration.getUser());
        UserGroupInformation ugi = UserGroupInformation.createRemoteUser(this.hdfsConfiguration.getUser());
        ugi.setAuthenticationMethod(UserGroupInformation.AuthenticationMethod.SIMPLE);

        ugi.doAs(new PrivilegedExceptionAction<Void>() {
            public Void run() throws Exception {
                Configuration conf = new Configuration();
                conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
                LOGGER.info("WebURI : {}", getURI().toString());
                client = FileSystem.get(getURI(), conf);
                LOGGER.info("Connected to WebHDFS");

                /*
                * ************************************************************************************************
                * This code is an example of how you would work with HDFS and you weren't going over
                * the webHDFS protocol.
                *
                * Smashew: 2013-10-01
                * ************************************************************************************************
                conf.set("fs.defaultFS", "hdfs://hadoop.mdigitallife.com:8020/user/" + userName);
                conf.set("namenode.host","0.0.0.0");
                conf.set("hadoop.job.ugi", userName);
                conf.set(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, "runner");
                fileSystem.createNewFile(new Path("/user/"+ userName + "/test"));
                FileStatus[] status = fs.listStatus(new Path("/user/" + userName));
                for(int i=0;i<status.length;i++)
                {
                LOGGER.info("Directory: {}", status[i].getPath());
                }
                */
                return null;
            }
        });
    } catch (Exception e) {
        LOGGER.error("There was an error connecting to WebHDFS, please check your settings and try again", e);
        throw new RuntimeException(e);
    }
}

From source file:org.apache.tajo.master.TaskRunnerLauncherImpl.java

License:Apache License

protected ContainerManager getCMProxy(ContainerId containerID, final String containerManagerBindAddr,
        ContainerToken containerToken) throws IOException {
    String[] hosts = containerManagerBindAddr.split(":");
    final InetSocketAddress cmAddr = new InetSocketAddress(hosts[0], Integer.parseInt(hosts[1]));
    UserGroupInformation user = UserGroupInformation.getCurrentUser();

    if (UserGroupInformation.isSecurityEnabled()) {
        Token<ContainerTokenIdentifier> token = ProtoUtils.convertFromProtoFormat(containerToken, cmAddr);
        // the user in createRemoteUser in this context has to be ContainerID
        user = UserGroupInformation.createRemoteUser(containerID.toString());
        user.addToken(token);//from  w  w w  . ja  va 2 s .c o  m
    }

    ContainerManager proxy = user.doAs(new PrivilegedAction<ContainerManager>() {
        @Override
        public ContainerManager run() {
            return (ContainerManager) yarnRPC.getProxy(ContainerManager.class, cmAddr, getConfig());
        }
    });
    return proxy;
}

From source file:org.apache.tajo.master.YarnContainerProxy.java

License:Apache License

protected ContainerManagementProtocol getCMProxy(ContainerId containerID, final String containerManagerBindAddr,
        Token containerToken) throws IOException {
    String[] hosts = containerManagerBindAddr.split(":");
    final InetSocketAddress cmAddr = new InetSocketAddress(hosts[0], Integer.parseInt(hosts[1]));
    UserGroupInformation user = UserGroupInformation.getCurrentUser();

    if (UserGroupInformation.isSecurityEnabled()) {
        org.apache.hadoop.security.token.Token<ContainerTokenIdentifier> token = ConverterUtils
                .convertFromYarn(containerToken, cmAddr);
        // the user in createRemoteUser in this context has to be ContainerID
        user = UserGroupInformation.createRemoteUser(containerID.toString());
        user.addToken(token);//  w w  w  . j  a  va2  s.  c o m
    }

    ContainerManagementProtocol proxy = user.doAs(new PrivilegedAction<ContainerManagementProtocol>() {
        @Override
        public ContainerManagementProtocol run() {
            return (ContainerManagementProtocol) yarnRPC.getProxy(ContainerManagementProtocol.class, cmAddr,
                    conf);
        }
    });

    return proxy;
}

From source file:org.apache.tajo.worker.ExecutionBlockContext.java

License:Apache License

public void init() throws Throwable {

    LOG.info("Tajo Root Dir: " + systemConf.getVar(TajoConf.ConfVars.ROOT_DIR));
    LOG.info("Worker Local Dir: " + systemConf.getVar(TajoConf.ConfVars.WORKER_TEMPORAL_DIR));

    this.qmMasterAddr = NetUtils.createSocketAddr(queryMaster.getHost(), queryMaster.getQueryMasterPort());
    LOG.info("QueryMaster Address:" + qmMasterAddr);

    UserGroupInformation.setConfiguration(systemConf);
    // TODO - 'load credential' should be implemented
    // Getting taskOwner
    UserGroupInformation taskOwner = UserGroupInformation
            .createRemoteUser(systemConf.getVar(TajoConf.ConfVars.USERNAME));

    // initialize DFS and LocalFileSystems
    this.taskOwner = taskOwner;
    this.reporter.startReporter();

    // resource intiailization
    try {/*from   w ww. j a  va2  s  .c  o m*/
        this.resource.initialize(queryContext, plan);
    } catch (Throwable e) {
        try {
            NettyClientBase client = getQueryMasterConnection();
            try {
                QueryMasterProtocol.QueryMasterProtocolService.Interface stub = client.getStub();
                stub.killQuery(null, executionBlockId.getQueryId().getProto(), NullCallback.get());
            } finally {
                connPool.releaseConnection(client);
            }
        } catch (Throwable t) {
            //ignore
        }
        throw e;
    }
}

From source file:org.apache.tez.client.TezClientUtils.java

License:Apache License

@Private
public static DAGClientAMProtocolBlockingPB getAMProxy(final Configuration conf, String amHost, int amRpcPort,
        org.apache.hadoop.yarn.api.records.Token clientToAMToken) throws IOException {

    final InetSocketAddress serviceAddr = NetUtils.createSocketAddrForHost(amHost, amRpcPort);
    UserGroupInformation userUgi = UserGroupInformation
            .createRemoteUser(UserGroupInformation.getCurrentUser().getUserName());
    if (clientToAMToken != null) {
        Token<ClientToAMTokenIdentifier> token = ConverterUtils.convertFromYarn(clientToAMToken, serviceAddr);
        userUgi.addToken(token);//  ww w . j av a 2  s .  co m
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Connecting to Tez AM at " + serviceAddr);
    }
    DAGClientAMProtocolBlockingPB proxy = null;
    try {
        proxy = userUgi.doAs(new PrivilegedExceptionAction<DAGClientAMProtocolBlockingPB>() {
            @Override
            public DAGClientAMProtocolBlockingPB run() throws IOException {
                RPC.setProtocolEngine(conf, DAGClientAMProtocolBlockingPB.class, ProtobufRpcEngine.class);
                return (DAGClientAMProtocolBlockingPB) RPC.getProxy(DAGClientAMProtocolBlockingPB.class, 0,
                        serviceAddr, conf);
            }
        });
    } catch (InterruptedException e) {
        throw new IOException("Failed to connect to AM", e);
    }
    return proxy;
}