List of usage examples for org.apache.thrift.transport TSaslServerTransport.Factory addServerDefinition
public void addServerDefinition(String mechanism, String protocol, String serverName, Map<String, String> props, CallbackHandler cbh)
From source file:alluxio.security.authentication.PlainSaslTransportProvider.java
License:Apache License
@Override public TTransportFactory getServerTransportFactory(Runnable runnable) throws SaslException { AuthType authType = Configuration.getEnum(PropertyKey.SECURITY_AUTHENTICATION_TYPE, AuthType.class); TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); AuthenticationProvider provider = AuthenticationProvider.Factory.create(authType); saslFactory.addServerDefinition(PlainSaslServerProvider.MECHANISM, null, null, new HashMap<String, String>(), new PlainSaslServerCallbackHandler(provider, runnable)); return saslFactory; }
From source file:alluxio.security.authentication.PlainSaslUtils.java
License:Apache License
/** * For server side, get a PLAIN mechanism {@link TTransportFactory}. A callback handler is hooked * for specific authentication methods.//from w ww.j a v a 2s . co m * * @param authType the authentication type * @param conf {@link Configuration} * @return a corresponding TTransportFactory, which is PLAIN mechanism * @throws SaslException if an {@link AuthenticationProvider} is not found */ public static TTransportFactory getPlainServerTransportFactory(AuthType authType, Configuration conf) throws SaslException { TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); AuthenticationProvider provider = AuthenticationProvider.Factory.create(authType, conf); saslFactory.addServerDefinition(PlainSaslServerProvider.MECHANISM, null, null, new HashMap<String, String>(), new PlainSaslServer.PlainServerCallbackHandler(provider)); return saslFactory; }
From source file:backtype.storm.security.auth.digest.DigestSaslTransportPlugin.java
License:Apache License
protected TTransportFactory getServerTransportFactory() throws IOException { //create an authentication callback handler CallbackHandler serer_callback_handler = new ServerCallbackHandler(login_conf); //create a transport factory that will invoke our auth callback for digest TSaslServerTransport.Factory factory = new TSaslServerTransport.Factory(); factory.addServerDefinition(DIGEST, AuthUtils.SERVICE, "localhost", null, serer_callback_handler); LOG.info("SASL DIGEST-MD5 transport factory will be used"); return factory; }
From source file:backtype.storm.security.auth.kerberos.KerberosSaslTransportPlugin.java
License:Apache License
public TTransportFactory getServerTransportFactory() throws IOException { //create an authentication callback handler CallbackHandler server_callback_handler = new ServerCallbackHandler(login_conf); //login our principal Subject subject = null;//w w w . ja v a 2 s .c om try { //specify a configuration object to be used Configuration.setConfiguration(login_conf); //now login Login login = new Login(AuthUtils.LOGIN_CONTEXT_SERVER, server_callback_handler); subject = login.getSubject(); } catch (LoginException ex) { LOG.error("Server failed to login in principal:" + ex, ex); throw new RuntimeException(ex); } //check the credential of our principal if (subject.getPrivateCredentials(KerberosTicket.class).isEmpty()) { throw new RuntimeException("Fail to verify user principal with section \"" + AuthUtils.LOGIN_CONTEXT_SERVER + "\" in login configuration file " + login_conf); } String principal = AuthUtils.get(login_conf, AuthUtils.LOGIN_CONTEXT_SERVER, "principal"); LOG.debug("principal:" + principal); KerberosName serviceKerberosName = new KerberosName(principal); String serviceName = serviceKerberosName.getServiceName(); String hostName = serviceKerberosName.getHostName(); Map<String, String> props = new TreeMap<String, String>(); props.put(Sasl.QOP, "auth"); props.put(Sasl.SERVER_AUTH, "false"); //create a transport factory that will invoke our auth callback for digest TSaslServerTransport.Factory factory = new TSaslServerTransport.Factory(); factory.addServerDefinition(KERBEROS, serviceName, hostName, props, server_callback_handler); //create a wrap transport factory so that we could apply user credential during connections TUGIAssumingTransportFactory wrapFactory = new TUGIAssumingTransportFactory(factory, subject); LOG.info("SASL GSSAPI transport factory will be used"); return wrapFactory; }
From source file:com.cloudera.llama.server.ThriftEndPoint.java
License:Apache License
public static TTransportFactory createTTransportFactory(ServerConfiguration conf) { TTransportFactory factory;// w ww . ja va2s .com if (Security.isSecure(conf)) { Map<String, String> saslProperties = new HashMap<String, String>(); saslProperties.put(Sasl.QOP, conf.getThriftQOP()); String principal = conf.getServerPrincipalName(); String name = extractPrincipalName(principal); String host = extractPrincipalHost(principal); if (host == null) { throw new IllegalArgumentException( FastFormat.format("Kerberos principal '{}' must have a hostname part", principal)); } TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, new GssCallback()); factory = saslFactory; } else { factory = new TTransportFactory(); } return factory; }
From source file:joshelser.Server.java
License:Apache License
public static void main(String[] args) throws Exception { Opts opts = new Opts(); opts.parseArgs(Server.class, args); Configuration conf = new Configuration(); FileSystem fs = FileSystem.get(conf); // Parse out the primary/instance@DOMAIN from the principal String principal = SecurityUtil.getServerPrincipal(opts.principal, InetAddress.getLocalHost().getCanonicalHostName()); HadoopKerberosName name = new HadoopKerberosName(principal); String primary = name.getServiceName(); String instance = name.getHostName(); // Log in using the keytab UserGroupInformation.loginUserFromKeytab(principal, opts.keytab); log.info("principal: {}", principal); log.info("name: {}", name); log.info("instance: {}", instance); log.info("primary: {}", primary); log.info("instance: {}", instance); // Get the info from our login UserGroupInformation serverUser = UserGroupInformation.getLoginUser(); log.info("Current user: {}", serverUser); // Open the server using the provide dport TServerSocket serverTransport = new TServerSocket(opts.port); // Wrap our implementation with the interface's processor HdfsService.Processor<Iface> processor = new HdfsService.Processor<Iface>(new HdfsServiceImpl(fs)); // Use authorization and confidentiality Map<String, String> saslProperties = new HashMap<String, String>(); saslProperties.put(Sasl.QOP, "auth-conf"); // Creating the server definition TSaslServerTransport.Factory saslTransportFactory = new TSaslServerTransport.Factory(); saslTransportFactory.addServerDefinition("GSSAPI", // tell SASL to use GSSAPI, which supports Kerberos primary, // kerberos primary for server - "myprincipal" in myprincipal/my.server.com@MY.REALM instance, // kerberos instance for server - "my.server.com" in myprincipal/my.server.com@MY.REALM saslProperties, // Properties set, above new SaslRpcServer.SaslGssCallbackHandler()); // Ensures that authenticated user is the same as the authorized user // Make sure the TTransportFactory is performing a UGI.doAs TTransportFactory ugiTransportFactory = new TUGIAssumingTransportFactory(saslTransportFactory, serverUser); // Processor which takes the UGI for the RPC call, proxy that user on the server login, and then run as the proxied user TUGIAssumingProcessor ugiProcessor = new TUGIAssumingProcessor(processor); // Make a simple TTheadPoolServer with the processor and transport factory TServer server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport) .transportFactory(ugiTransportFactory).processor(ugiProcessor)); // Start the thrift server server.serve();// w w w.ja v a 2s . c o m }
From source file:org.apache.accumulo.server.rpc.TServerUtils.java
License:Apache License
public static ServerAddress createSaslThreadPoolServer(HostAndPort address, TProcessor processor, TProtocolFactory protocolFactory, long socketTimeout, SaslServerConnectionParams params, final String serverName, String threadName, final int numThreads, final int numSTThreads, long timeBetweenThreadChecks) throws TTransportException { // We'd really prefer to use THsHaServer (or similar) to avoid 1 RPC == 1 Thread that the TThreadPoolServer does, // but sadly this isn't the case. Because TSaslTransport needs to issue a handshake when it open()'s which will fail // when the server does an accept() to (presumably) wake up the eventing system. log.info("Creating SASL thread pool thrift server on listening on {}:{}", address.getHostText(), address.getPort());//from w w w .ja v a 2 s . c o m TServerSocket transport = new TServerSocket(address.getPort(), (int) socketTimeout); String hostname, fqdn; try { hostname = InetAddress.getByName(address.getHostText()).getCanonicalHostName(); fqdn = InetAddress.getLocalHost().getCanonicalHostName(); } catch (UnknownHostException e) { throw new TTransportException(e); } // If we can't get a real hostname from the provided host test, use the hostname from DNS for localhost if ("0.0.0.0".equals(hostname)) { hostname = fqdn; } // ACCUMULO-3497 an easy sanity check we can perform for the user when SASL is enabled. Clients and servers have to agree upon the FQDN // so that the SASL handshake can occur. If the provided hostname doesn't match the FQDN for this host, fail quickly and inform them to update // their configuration. if (!hostname.equals(fqdn)) { log.error( "Expected hostname of '{}' but got '{}'. Ensure the entries in the Accumulo hosts files (e.g. masters, slaves) are the FQDN for each host when using SASL.", fqdn, hostname); throw new RuntimeException( "SASL requires that the address the thrift server listens on is the same as the FQDN for this host"); } final UserGroupInformation serverUser; try { serverUser = UserGroupInformation.getLoginUser(); } catch (IOException e) { throw new TTransportException(e); } log.debug("Logged in as {}, creating TSaslServerTransport factory with {}/{}", serverUser, params.getKerberosServerPrimary(), hostname); // Make the SASL transport factory with the instance and primary from the kerberos server principal, SASL properties // and the SASL callback handler from Hadoop to ensure authorization ID is the authentication ID. Despite the 'protocol' argument seeming to be useless, it // *must* be the primary of the server. TSaslServerTransport.Factory saslTransportFactory = new TSaslServerTransport.Factory(); saslTransportFactory.addServerDefinition(ThriftUtil.GSSAPI, params.getKerberosServerPrimary(), hostname, params.getSaslProperties(), new SaslRpcServer.SaslGssCallbackHandler()); if (null != params.getSecretManager()) { log.info("Adding DIGEST-MD5 server definition for delegation tokens"); saslTransportFactory.addServerDefinition(ThriftUtil.DIGEST_MD5, params.getKerberosServerPrimary(), hostname, params.getSaslProperties(), new SaslServerDigestCallbackHandler(params.getSecretManager())); } else { log.info("SecretManager is null, not adding support for delegation token authentication"); } // Make sure the TTransportFactory is performing a UGI.doAs TTransportFactory ugiTransportFactory = new UGIAssumingTransportFactory(saslTransportFactory, serverUser); if (address.getPort() == 0) { // If we chose a port dynamically, make a new use it (along with the proper hostname) address = HostAndPort.fromParts(address.getHostText(), transport.getServerSocket().getLocalPort()); log.info("SASL thrift server bound on {}", address); } ThreadPoolExecutor pool = createSelfResizingThreadPool(serverName, numThreads, numSTThreads, timeBetweenThreadChecks); final TThreadPoolServer server = createTThreadPoolServer(transport, processor, ugiTransportFactory, protocolFactory, pool); return new ServerAddress(server, address); }
From source file:org.apache.flume.source.ThriftSource.java
License:Apache License
private TTransportFactory getSASLTransportFactory() { String[] names;//from ww w. j ava 2 s. c o m try { names = FlumeAuthenticationUtil.splitKerberosName(principal); } catch (IOException e) { throw new FlumeException("Error while trying to resolve Principal name - " + principal, e); } Map<String, String> saslProperties = new HashMap<String, String>(); saslProperties.put(Sasl.QOP, "auth"); TSaslServerTransport.Factory saslTransportFactory = new TSaslServerTransport.Factory(); saslTransportFactory.addServerDefinition("GSSAPI", names[0], names[1], saslProperties, FlumeAuthenticationUtil.getSaslGssCallbackHandler()); return saslTransportFactory; }
From source file:org.apache.hadoop.hbase.thrift.ThriftServerRunner.java
License:Apache License
/** * Setting up the thrift TServer/*from ww w . j ava 2s. c om*/ */ private void setupServer() throws Exception { // Construct correct ProtocolFactory TProtocolFactory protocolFactory; if (conf.getBoolean(COMPACT_CONF_KEY, false)) { LOG.debug("Using compact protocol"); protocolFactory = new TCompactProtocol.Factory(); } else { LOG.debug("Using binary protocol"); protocolFactory = new TBinaryProtocol.Factory(); } final TProcessor p = new Hbase.Processor<Hbase.Iface>(handler); ImplType implType = ImplType.getServerImpl(conf); TProcessor processor = p; // Construct correct TransportFactory TTransportFactory transportFactory; if (conf.getBoolean(FRAMED_CONF_KEY, false) || implType.isAlwaysFramed) { if (qop != null) { throw new RuntimeException( "Thrift server authentication" + " doesn't work with framed transport yet"); } transportFactory = new TFramedTransport.Factory(conf.getInt(MAX_FRAME_SIZE_CONF_KEY, 2) * 1024 * 1024); LOG.debug("Using framed transport"); } else if (qop == null) { transportFactory = new TTransportFactory(); } else { // Extract the name from the principal String name = SecurityUtil.getUserFromPrincipal(conf.get("hbase.thrift.kerberos.principal")); Map<String, String> saslProperties = new HashMap<String, String>(); saslProperties.put(Sasl.QOP, qop); TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, new SaslGssCallbackHandler() { @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { AuthorizeCallback ac = null; for (Callback callback : callbacks) { if (callback instanceof AuthorizeCallback) { ac = (AuthorizeCallback) callback; } else { throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback"); } } if (ac != null) { String authid = ac.getAuthenticationID(); String authzid = ac.getAuthorizationID(); if (!authid.equals(authzid)) { ac.setAuthorized(false); } else { ac.setAuthorized(true); String userName = SecurityUtil.getUserFromPrincipal(authzid); LOG.info("Effective user: " + userName); ac.setAuthorizedID(userName); } } } }); transportFactory = saslFactory; // Create a processor wrapper, to get the caller processor = new TProcessor() { @Override public boolean process(TProtocol inProt, TProtocol outProt) throws TException { TSaslServerTransport saslServerTransport = (TSaslServerTransport) inProt.getTransport(); SaslServer saslServer = saslServerTransport.getSaslServer(); String principal = saslServer.getAuthorizationID(); hbaseHandler.setEffectiveUser(principal); return p.process(inProt, outProt); } }; } if (conf.get(BIND_CONF_KEY) != null && !implType.canSpecifyBindIP) { LOG.error("Server types " + Joiner.on(", ").join(ImplType.serversThatCannotSpecifyBindIP()) + " don't support IP " + "address binding at the moment. See " + "https://issues.apache.org/jira/browse/HBASE-2155 for details."); throw new RuntimeException("-" + BIND_CONF_KEY + " not supported with " + implType); } if (implType == ImplType.HS_HA || implType == ImplType.NONBLOCKING || implType == ImplType.THREADED_SELECTOR) { InetAddress listenAddress = getBindAddress(conf); TNonblockingServerTransport serverTransport = new TNonblockingServerSocket( new InetSocketAddress(listenAddress, listenPort)); if (implType == ImplType.NONBLOCKING) { TNonblockingServer.Args serverArgs = new TNonblockingServer.Args(serverTransport); serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory); tserver = new TNonblockingServer(serverArgs); } else if (implType == ImplType.HS_HA) { THsHaServer.Args serverArgs = new THsHaServer.Args(serverTransport); CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<Call>(), metrics); ExecutorService executorService = createExecutor(callQueue, serverArgs.getWorkerThreads()); serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory) .protocolFactory(protocolFactory); tserver = new THsHaServer(serverArgs); } else { // THREADED_SELECTOR TThreadedSelectorServer.Args serverArgs = new HThreadedSelectorServerArgs(serverTransport, conf); CallQueue callQueue = new CallQueue(new LinkedBlockingQueue<Call>(), metrics); ExecutorService executorService = createExecutor(callQueue, serverArgs.getWorkerThreads()); serverArgs.executorService(executorService).processor(processor).transportFactory(transportFactory) .protocolFactory(protocolFactory); tserver = new TThreadedSelectorServer(serverArgs); } LOG.info("starting HBase " + implType.simpleClassName() + " server on " + Integer.toString(listenPort)); } else if (implType == ImplType.THREAD_POOL) { // Thread pool server. Get the IP address to bind to. InetAddress listenAddress = getBindAddress(conf); TServerTransport serverTransport = new TServerSocket(new InetSocketAddress(listenAddress, listenPort)); TBoundedThreadPoolServer.Args serverArgs = new TBoundedThreadPoolServer.Args(serverTransport, conf); serverArgs.processor(processor).transportFactory(transportFactory).protocolFactory(protocolFactory); LOG.info("starting " + ImplType.THREAD_POOL.simpleClassName() + " on " + listenAddress + ":" + Integer.toString(listenPort) + "; " + serverArgs); TBoundedThreadPoolServer tserver = new TBoundedThreadPoolServer(serverArgs, metrics); this.tserver = tserver; } else { throw new AssertionError("Unsupported Thrift server implementation: " + implType.simpleClassName()); } // A sanity check that we instantiated the right type of server. if (tserver.getClass() != implType.serverClass) { throw new AssertionError("Expected to create Thrift server class " + implType.serverClass.getName() + " but got " + tserver.getClass().getName()); } registerFilters(conf); }
From source file:org.apache.hadoop.hbase.thrift2.ThriftServer.java
License:Apache License
private static TTransportFactory getTTransportFactory(String qop, String name, String host, boolean framed, int frameSize) { if (framed) { if (qop != null) { throw new RuntimeException( "Thrift server authentication" + " doesn't work with framed transport yet"); }//from w w w . ja v a 2 s . co m log.debug("Using framed transport"); return new TFramedTransport.Factory(frameSize); } else if (qop == null) { return new TTransportFactory(); } else { Map<String, String> saslProperties = new HashMap<String, String>(); saslProperties.put(Sasl.QOP, qop); TSaslServerTransport.Factory saslFactory = new TSaslServerTransport.Factory(); saslFactory.addServerDefinition("GSSAPI", name, host, saslProperties, new SaslGssCallbackHandler() { @Override public void handle(Callback[] callbacks) throws UnsupportedCallbackException { AuthorizeCallback ac = null; for (Callback callback : callbacks) { if (callback instanceof AuthorizeCallback) { ac = (AuthorizeCallback) callback; } else { throw new UnsupportedCallbackException(callback, "Unrecognized SASL GSSAPI Callback"); } } if (ac != null) { String authid = ac.getAuthenticationID(); String authzid = ac.getAuthorizationID(); if (!authid.equals(authzid)) { ac.setAuthorized(false); } else { ac.setAuthorized(true); String userName = SecurityUtil.getUserFromPrincipal(authzid); log.info("Effective user: " + userName); ac.setAuthorizedID(userName); } } } }); return saslFactory; } }