List of usage examples for org.apache.thrift.server TThreadedSelectorServer.Args selectorThreads
Set selectorThreads
To view the source code for org.apache.thrift.server TThreadedSelectorServer.Args selectorThreads.
Click Source Link
From source file:ch.epfl.eagle.daemon.util.TServers.java
License:Apache License
/** * Launch a multi-threaded Thrift server with the given {@code processor}. Note that * internally this creates an expanding thread pool of at most {@code threads} threads, * and requests are queued whenever that thread pool is saturated. *///w w w . j a v a2 s . c om public static void launchThreadedThriftServer(int port, int threads, TProcessor processor) throws IOException { LOG.info("Staring async thrift server of type: " + processor.getClass().toString() + " on port " + port); TNonblockingServerTransport serverTransport; try { serverTransport = new TNonblockingServerSocket(port); } catch (TTransportException e) { throw new IOException(e); } TThreadedSelectorServer.Args serverArgs = new TThreadedSelectorServer.Args(serverTransport); serverArgs.transportFactory(new TFramedTransport.Factory()); serverArgs.protocolFactory(new TBinaryProtocol.Factory()); serverArgs.processor(processor); serverArgs.selectorThreads(SELECTOR_THREADS); serverArgs.workerThreads(threads); TServer server = new TThreadedSelectorServer(serverArgs); new Thread(new TServerRunnable(server)).start(); }
From source file:com.liveramp.hank.partition_server.PartitionServer.java
License:Apache License
protected void startThriftServer() throws TTransportException, IOException, InterruptedException { IfaceWithShutdown handler = null;// ww w . ja v a 2s.co m try { // Set up the service handler handler = getHandler(); // Launch the thrift server TNonblockingServerSocket serverSocket = new TNonblockingServerSocket(configurator.getServicePort()); TThreadedSelectorServer.Args options = new TThreadedSelectorServer.Args(serverSocket); options.processor(new com.liveramp.hank.generated.PartitionServer.Processor(handler)); options.workerThreads(configurator.getNumConcurrentQueries()); options.selectorThreads(4); options.protocolFactory(new TCompactProtocol.Factory()); options.maxReadBufferBytes = MAX_BUFFER_SIZE; dataServer = new TThreadedSelectorServer(options); LOG.info("Launching Thrift server."); dataServer.serve(); LOG.info("Thrift server exited."); // The Thrift server does not clean up selectors after stopping, which leads to a file descriptor leak. // See https://issues.apache.org/jira/browse/THRIFT-2274 // TODO: when the bug is fixed in Thrift, remove this ugly hack which takes care of the issue List<Selector> selectors = getServerSelectors(dataServer); closeServerSelectors(selectors); // Close the socket serverSocket.close(); } finally { // Always shut down the handler if (handler != null) { LOG.debug("Shutting down Partition Server handler."); handler.shutDown(); } } }
From source file:de.hzi.helmholtz.ThriftService.ThriftServer.java
public void init(String portNo) throws InterruptedException, TTransportException { try {/*ww w .j a v a 2 s . c o m*/ if (portNo.equals("")) { System.out.println("Port number is empty"); return; } System.out.println("Starting server on port " + portNo + " ..."); //TSSLTransportFactory.TSSLTransportParameters params = new TSSLTransportFactory.TSSLTransportParameters(); //params.setKeyStore(Properties.THIFT_SSC_PATH, Properties.THRIFT_SSC_PW); ThriftServiceHandler handler = new ThriftServiceHandler(); BiosynThriftService.Processor<ThriftServiceHandler> processor = new BiosynThriftService.Processor<ThriftServiceHandler>( handler); //TServerSocket serverTransport = TSSLTransportFactory.getServerSocket( Integer.valueOf(portNo),1000000000, InetAddress.getByName(Properties.MXBASE_SERVER), params); //TServerTransport serverTransport = new TServerSocket(Integer.valueOf(portNo)); TNonblockingServerTransport trans = new TNonblockingServerSocket(Integer.valueOf(portNo)); //TNonblockingServerTransport serverTransport = new TNonblockingServerSocket(Integer.valueOf(portNo)); TThreadedSelectorServer.Args args = new TThreadedSelectorServer.Args(trans); args.transportFactory(new TFramedTransport.Factory()); args.protocolFactory(new TCompactProtocol.Factory());// TBinaryProtocol.Factory()); args.processor(processor); args.selectorThreads(4); args.workerThreads(32); server = new TThreadedSelectorServer(args); //server = new TNonblockingServer(new TNonblockingServer.Args(serverTransport).processor(processor)); //server = new TThreadPoolServer(new TThreadPoolServer.Args(serverTransport).processor(processor)); server.serve(); //new Thread(this).start(); //while (!server.isServing()) { // Thread.sleep(1); //}; } catch (Exception ex) { System.out.println(ex.toString()); Logger.getLogger(ThriftServer.class.getName()).log(Level.SEVERE, null, ex); } }
From source file:org.apache.dubbo.rpc.protocol.nativethrift.ThriftProtocol.java
License:Apache License
private <T> Runnable exportThreadedSelectorServer(T impl, Class<T> type, URL url) throws RpcException { TThreadedSelectorServer.Args tArgs = null; String typeName = type.getName(); TServer tserver = null;//www. j av a 2 s . c o m if (typeName.endsWith(THRIFT_IFACE)) { String processorClsName = typeName.substring(0, typeName.indexOf(THRIFT_IFACE)) + THRIFT_PROCESSOR; try { Class<?> clazz = Class.forName(processorClsName); Constructor constructor = clazz.getConstructor(type); try { TProcessor tprocessor = (TProcessor) constructor.newInstance(impl); processor.registerProcessor(typeName, tprocessor); tserver = serverMap.get(url.getAddress()); if (tserver == null) { /**Solve the problem of only 50 of the default number of concurrent connections*/ TNonblockingServerSocket.NonblockingAbstractServerSocketArgs args = new TNonblockingServerSocket.NonblockingAbstractServerSocketArgs(); /**1000 connections*/ args.backlog(1000); args.bindAddr(new InetSocketAddress(url.getHost(), url.getPort())); /**timeout: 10s */ args.clientTimeout(10000); TNonblockingServerSocket transport = new TNonblockingServerSocket(args); tArgs = new TThreadedSelectorServer.Args(transport); tArgs.workerThreads(200); tArgs.selectorThreads(4); tArgs.acceptQueueSizePerThread(256); tArgs.processor(processor); tArgs.transportFactory(new TFramedTransport.Factory()); tArgs.protocolFactory(new TCompactProtocol.Factory()); } else { return null; // if server is starting, return and do nothing here } } catch (Exception e) { logger.error(e.getMessage(), e); throw new RpcException("Fail to create nativethrift server(" + url + ") : " + e.getMessage(), e); } } catch (Exception e) { logger.error(e.getMessage(), e); throw new RpcException("Fail to create nativethrift server(" + url + ") : " + e.getMessage(), e); } } if (tserver == null && tArgs == null) { logger.error("Fail to create nativethrift server(" + url + ") due to null args"); throw new RpcException("Fail to create nativethrift server(" + url + ") due to null args"); } final TServer thriftServer = new TThreadedSelectorServer(tArgs); serverMap.put(url.getAddress(), thriftServer); new Thread(() -> { logger.info("Start Thrift ThreadedSelectorServer"); thriftServer.serve(); logger.info("Thrift ThreadedSelectorServer started."); }).start(); return () -> { try { logger.info("Close Thrift NonblockingServer"); thriftServer.stop(); } catch (Throwable e) { logger.warn(e.getMessage(), e); } }; }
From source file:org.diqube.server.ServerImplementation.java
License:Open Source License
private TThreadedSelectorServer.Args createServerArgs() { TMultiplexedProcessor multiProcessor = new TMultiplexedProcessor(); // not-integrity-checked services: communication from "outside" of diqube-servers multiProcessor.registerProcessor(QueryServiceConstants.SERVICE_NAME, new IntegrityCheckingProtocol.IntegrityCheckDisablingProcessor( new QueryService.Processor<QueryService.Iface>(queryHandler))); multiProcessor.registerProcessor(KeepAliveServiceConstants.SERVICE_NAME, new IntegrityCheckingProtocol.IntegrityCheckDisablingProcessor( new KeepAliveService.Processor<KeepAliveService.Iface>(keepAliveHandler))); multiProcessor.registerProcessor(FlattenPreparationServiceConstants.SERVICE_NAME, new IntegrityCheckingProtocol.IntegrityCheckDisablingProcessor( new FlattenPreparationService.Processor<FlattenPreparationService.Iface>( flattenPreparationHandler))); multiProcessor.registerProcessor(ClusterInformationServiceConstants.SERVICE_NAME, new IntegrityCheckingProtocol.IntegrityCheckDisablingProcessor( new ClusterInformationService.Processor<ClusterInformationService.Iface>( clusterInformationHandler))); multiProcessor.registerProcessor(IdentityServiceConstants.SERVICE_NAME, new IntegrityCheckingProtocol.IntegrityCheckDisablingProcessor( new IdentityService.Processor<IdentityService.Iface>(identityHandler))); multiProcessor.registerProcessor(IdentityCallbackServiceConstants.SERVICE_NAME, new IntegrityCheckingProtocol.IntegrityCheckDisablingProcessor( new IdentityCallbackService.Processor<IdentityCallbackService.Iface>( identityCallbackHandler))); multiProcessor.registerProcessor(TableMetadataServiceConstants.SERVICE_NAME, new IntegrityCheckingProtocol.IntegrityCheckDisablingProcessor( new TableMetadataService.Processor<TableMetadataService.Iface>(tableMetadataHandler))); // integrity-checked services: Communication between diqube-servers multiProcessor.registerProcessor(ClusterQueryServiceConstants.SERVICE_NAME, new ClusterQueryService.Processor<ClusterQueryService.Iface>(clusterQueryHandler)); multiProcessor.registerProcessor(ClusterManagementServiceConstants.SERVICE_NAME, new ClusterManagementService.Processor<ClusterManagementService.Iface>(clusterManagementHandler)); multiProcessor.registerProcessor(ClusterFlattenServiceConstants.SERVICE_NAME, new ClusterFlattenService.Processor<ClusterFlattenService.Iface>(clusterFlattenHandler)); multiProcessor.registerProcessor(ClusterConsensusServiceConstants.SERVICE_NAME, new ClusterConsensusService.Processor<ClusterConsensusService.Iface>(clusterConsensusHandler)); TNonblockingServerTransport transport; try {// ww w . j a v a 2s . c o m if ("".equals(bind)) transport = new TNonblockingServerSocket(port); else transport = new TNonblockingServerSocket(new InetSocketAddress(bind, port)); } catch (TTransportException e) { logger.error("Could not bind to port {}", port, e); return null; } // TThreadedSelectorServer: // 1 Accept Thread // selectorThreads number of selector threads: Read and write for accepted connections // uses ExecutorService to actually invoke any methods. TThreadedSelectorServer.Args serverArgs = new TThreadedSelectorServer.Args(transport); serverArgs.processor(multiProcessor); serverArgs.transportFactory(new RememberingTransport.Factory(new TFramedTransport.Factory())); serverArgs.protocolFactory(new IntegrityCheckingProtocol.Factory(new TCompactProtocol.Factory(), integritySecretHelper.provideMessageIntegritySecrets())); logger.info("Thrift server will use {} selector threads.", selectorThreads); serverArgs.selectorThreads(selectorThreads); serverArgs.executorService( executorManager.newCachedThreadPool("server-worker-%d", new Thread.UncaughtExceptionHandler() { @Override public void uncaughtException(Thread t, Throwable e) { logger.error("Uncaught exception in one of the server workers", e); server.stop(); // stop everything and shut down. } })); return serverArgs; }