List of usage examples for org.apache.hadoop.yarn.event AsyncDispatcher AsyncDispatcher
public AsyncDispatcher()
From source file:org.apache.tajo.engine.plan.global.TestGlobalQueryPlanner.java
License:Apache License
@BeforeClass public static void setup() throws Exception { util = new TajoTestingCluster(); util.startCatalogCluster();//from w w w . j a v a 2 s .co m int i, j; schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); schema.addColumn("salary", Type.INT4); TableMeta meta; conf = new TajoConf(util.getConfiguration()); catalog = util.getMiniCatalogCluster().getCatalog(); for (FunctionDesc funcDesc : TajoMaster.initBuiltinFunctions()) { catalog.registerFunction(funcDesc); } sm = new StorageManager(util.getConfiguration()); FunctionDesc funcDesc = new FunctionDesc("sumtest", TestSum.class, FunctionType.GENERAL, CatalogUtil.newDataTypesWithoutLen(Type.INT4), CatalogUtil.newDataTypesWithoutLen(Type.INT4)); catalog.registerFunction(funcDesc); FileSystem fs = sm.getFileSystem(); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); planner = new GlobalPlanner(conf, catalog, new StorageManager(conf), dispatcher.getEventHandler()); analyzer = new QueryAnalyzer(catalog); logicalPlanner = new LogicalPlanner(catalog); int tbNum = 2; int tupleNum; Appender appender; Tuple t = new VTuple(4); t.put(new Datum[] { DatumFactory.createInt4(1), DatumFactory.createInt4(32), DatumFactory.createText("h"), DatumFactory.createInt4(10) }); for (i = 0; i < tbNum; i++) { meta = CatalogUtil.newTableMeta((Schema) schema.clone(), StoreType.CSV); meta.putOption(CSVFile.DELIMITER, ","); Path dataRoot = sm.getBaseDir(); Path tablePath = StorageUtil.concatPath(dataRoot, "table" + i, "file.csv"); if (fs.exists(tablePath.getParent())) { fs.delete(tablePath.getParent(), true); } fs.mkdirs(tablePath.getParent()); appender = StorageManager.getAppender(conf, meta, tablePath); appender.init(); tupleNum = 100; for (j = 0; j < tupleNum; j++) { appender.addTuple(t); } appender.close(); TableDesc desc = CatalogUtil.newTableDesc("table" + i, (TableMeta) meta.clone(), tablePath); catalog.addTable(desc); } QueryIdFactory.reset(); queryId = QueryIdFactory.newQueryId(); dispatcher.stop(); }
From source file:org.apache.tajo.engine.planner.global.TestGlobalQueryOptimizer.java
License:Apache License
@BeforeClass public static void setup() throws Exception { util = new TajoTestingCluster(); util.startCatalogCluster();/*w w w.j a va2 s . com*/ int i, j; schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); schema.addColumn("salary", Type.INT4); TableMeta meta; conf = new TajoConf(util.getConfiguration()); catalog = util.getMiniCatalogCluster().getCatalog(); StorageManager sm = new StorageManager(util.getConfiguration()); FunctionDesc funcDesc = new FunctionDesc("sumtest", TestSum.class, FunctionType.GENERAL, CatalogUtil.newDataTypesWithoutLen(Type.INT4), CatalogUtil.newDataTypesWithoutLen(Type.INT4)); catalog.registerFunction(funcDesc); FileSystem fs = sm.getFileSystem(); AsyncDispatcher dispatcher = new AsyncDispatcher(); planner = new GlobalPlanner(conf, catalog, new StorageManager(conf), dispatcher.getEventHandler()); analyzer = new QueryAnalyzer(catalog); logicalPlanner = new LogicalPlanner(catalog); int tbNum = 2; int tupleNum; Appender appender; Tuple t = new VTuple(4); t.put(new Datum[] { DatumFactory.createInt4(1), DatumFactory.createInt4(32), DatumFactory.createText("h"), DatumFactory.createInt4(10) }); for (i = 0; i < tbNum; i++) { meta = CatalogUtil.newTableMeta((Schema) schema.clone(), StoreType.CSV); meta.putOption(CSVFile.DELIMITER, ","); Path dataRoot = sm.getBaseDir(); Path tablePath = StorageUtil.concatPath(dataRoot, "table" + i, "file.csv"); if (fs.exists(tablePath.getParent())) { fs.delete(tablePath.getParent(), true); } fs.mkdirs(tablePath.getParent()); appender = StorageManager.getAppender(conf, meta, tablePath); appender.init(); tupleNum = 100; for (j = 0; j < tupleNum; j++) { appender.addTuple(t); } appender.close(); TableDesc desc = CatalogUtil.newTableDesc("table" + i, (TableMeta) meta.clone(), sm.getTablePath("table" + i)); catalog.addTable(desc); } QueryIdFactory.reset(); queryId = QueryIdFactory.newQueryId(); optimizer = new GlobalOptimizer(); }
From source file:org.apache.tajo.master.QueryManager.java
License:Apache License
@Override public void serviceInit(Configuration conf) throws Exception { try {/* ww w. j a v a2s . c o m*/ this.dispatcher = new AsyncDispatcher(); addService(this.dispatcher); this.dispatcher.register(QueryJobEvent.Type.class, new QueryJobManagerEventHandler()); this.scheduler = new SimpleFifoScheduler(this); } catch (Exception e) { LOG.error("Failed to init service " + getName() + " by exception " + e, e); } super.serviceInit(conf); }
From source file:org.apache.tajo.master.querymaster.QueryJobManager.java
License:Apache License
@Override public void init(Configuration conf) { try {/*ww w.j a v a 2 s .co m*/ this.dispatcher = new AsyncDispatcher(); addService(this.dispatcher); this.dispatcher.register(QueryJobEvent.Type.class, new QueryJobManagerEventHandler()); this.scheduler = new SimpleFifoScheduler(this); } catch (Exception e) { catchException(null, e); } super.init(conf); }
From source file:org.apache.tajo.master.QueryMaster.java
License:Apache License
public void init(Configuration _conf) { QueryConf conf = new QueryConf(_conf); try {//from w w w . j a v a 2 s .c om queryContext = new QueryContext(conf); dispatcher = masterContext.getDispatcher(); // TODO - This comment should be eliminated when QueryMaster is separated. dispatcher = new AsyncDispatcher(); addIfService(dispatcher); // TODO - This comment should be improved when QueryMaster is separated. rpc = masterContext.getYarnRPC(); catalog = masterContext.getCatalog(); storageManager = masterContext.getStorageManager(); taskRunnerListener = new TaskRunnerListener(queryContext); addIfService(taskRunnerListener); rmAllocator = new RMContainerAllocator(queryContext); addIfService(rmAllocator); dispatcher.register(ContainerAllocatorEventType.class, rmAllocator); query = new Query(queryContext, queryId, clock, appSubmitTime, "", dispatcher.getEventHandler(), masterPlan, storageManager); initStagingDir(); // QueryEventDispatcher is already registered in TajoMaster dispatcher.register(QueryEventType.class, query); dispatcher.register(SubQueryEventType.class, new SubQueryEventDispatcher()); dispatcher.register(TaskEventType.class, new TaskEventDispatcher()); dispatcher.register(TaskAttemptEventType.class, new TaskAttemptEventDispatcher()); dispatcher.register(QueryFinishEvent.EventType.class, new QueryFinishEventHandler()); dispatcher.register(TaskSchedulerEvent.EventType.class, new TaskSchedulerDispatcher()); taskRunnerLauncher = new TaskRunnerLauncherImpl(queryContext); addIfService(taskRunnerLauncher); dispatcher.register(TaskRunnerGroupEvent.EventType.class, taskRunnerLauncher); } catch (Throwable t) { LOG.error(ExceptionUtils.getStackTrace(t)); throw new RuntimeException(t); } super.init(conf); }
From source file:org.apache.tajo.master.rm.TajoResourceManager.java
License:Apache License
@Override public void serviceInit(Configuration conf) throws Exception { this.systemConf = TUtil.checkTypeAndGet(conf, TajoConf.class); AsyncDispatcher dispatcher = new AsyncDispatcher(); addIfService(dispatcher);/*from ww w.j a v a 2 s .c om*/ rmContext = new TajoRMContext(dispatcher); this.queryIdSeed = String.valueOf(System.currentTimeMillis()); this.nodeLivelinessMonitor = new NodeLivelinessMonitor(this.rmContext.getDispatcher()); addIfService(this.nodeLivelinessMonitor); // Register event handler for Workers rmContext.getDispatcher().register(NodeEventType.class, new WorkerEventDispatcher(rmContext)); resourceTracker = new TajoResourceTracker(this, nodeLivelinessMonitor); addIfService(resourceTracker); String schedulerClassName = systemConf.getVar(TajoConf.ConfVars.RESOURCE_SCHEDULER_CLASS); scheduler = loadScheduler(schedulerClassName); LOG.info("Loaded resource scheduler : " + scheduler.getClass()); addIfService(scheduler); rmContext.getDispatcher().register(SchedulerEventType.class, scheduler); super.serviceInit(systemConf); }
From source file:org.apache.tajo.master.rm.TajoWorkerResourceManager.java
License:Apache License
@Override public void serviceInit(Configuration conf) throws Exception { Preconditions.checkArgument(conf instanceof TajoConf); this.systemConf = (TajoConf) conf; AsyncDispatcher dispatcher = new AsyncDispatcher(); addIfService(dispatcher);/*from w ww. j a v a2s. c om*/ rmContext = new TajoRMContext(dispatcher); this.queryIdSeed = String.valueOf(System.currentTimeMillis()); requestQueue = new LinkedBlockingDeque<WorkerResourceRequest>(); workerResourceAllocator = new WorkerResourceAllocationThread(); workerResourceAllocator.start(); this.workerLivelinessMonitor = new WorkerLivelinessMonitor(this.rmContext.getDispatcher()); addIfService(this.workerLivelinessMonitor); // Register event handler for Workers rmContext.getDispatcher().register(WorkerEventType.class, new WorkerEventDispatcher(rmContext)); resourceTracker = new TajoResourceTracker(rmContext, workerLivelinessMonitor); addIfService(resourceTracker); super.serviceInit(systemConf); }
From source file:org.apache.tajo.master.scheduler.TestSimpleScheduler.java
License:Apache License
@Before public void setup() { conf = new TajoConf(); nodeResource = NodeResource.createResource(1500, 2, 3); service = new CompositeService(TestSimpleScheduler.class.getSimpleName()) { @Override/* ww w .jav a 2 s. co m*/ protected void serviceInit(Configuration conf) throws Exception { dispatcher = new AsyncDispatcher(); addService(dispatcher); rmContext = new TajoRMContext(dispatcher); rmContext.getDispatcher().register(NodeEventType.class, new TajoResourceManager.WorkerEventDispatcher(rmContext)); barrier = new Semaphore(0); scheduler = new MySimpleScheduler(rmContext, barrier); addService(scheduler); rmContext.getDispatcher().register(SchedulerEventType.class, scheduler); for (int i = 0; i < workerNum; i++) { WorkerConnectionInfo conn = new WorkerConnectionInfo("host" + i, 28091 + i, 28092, 21000, 28093, 28080); rmContext.getNodes().putIfAbsent(conn.getId(), new NodeStatus(rmContext, NodeResources.clone(nodeResource), conn)); rmContext.getDispatcher().getEventHandler() .handle(new NodeEvent(conn.getId(), NodeEventType.STARTED)); } super.serviceInit(conf); } }; service.init(conf); service.start(); assertEquals(workerNum, rmContext.getNodes().size()); totalResource = NodeResources.createResource(0); for (NodeStatus nodeStatus : rmContext.getNodes().values()) { NodeResources.addTo(totalResource, nodeStatus.getTotalResourceCapability()); } }
From source file:org.apache.tajo.master.TajoMaster.java
License:Apache License
@Override public void serviceInit(Configuration _conf) throws Exception { if (!(_conf instanceof TajoConf)) { throw new IllegalArgumentException("_conf should be a TajoConf type."); }//from ww w .j a v a2 s. c o m this.systemConf = (TajoConf) _conf; Runtime.getRuntime().addShutdownHook(new Thread(new ShutdownHook())); context = new MasterContext(systemConf); clock = new SystemClock(); try { RackResolver.init(systemConf); initResourceManager(); initWebServer(); this.dispatcher = new AsyncDispatcher(); addIfService(dispatcher); // check the system directory and create if they are not created. checkAndInitializeSystemDirectories(); diagnoseTajoMaster(); this.storeManager = StorageManager.getFileStorageManager(systemConf); catalogServer = new CatalogServer(FunctionLoader.load()); addIfService(catalogServer); catalog = new LocalCatalogWrapper(catalogServer, systemConf); sessionManager = new SessionManager(dispatcher); addIfService(sessionManager); globalEngine = new GlobalEngine(context); addIfService(globalEngine); queryManager = new QueryManager(context); addIfService(queryManager); tajoMasterClientService = new TajoMasterClientService(context); addIfService(tajoMasterClientService); tajoMasterService = new QueryCoordinatorService(context); addIfService(tajoMasterService); } catch (Exception e) { LOG.error(e.getMessage(), e); throw e; } super.serviceInit(systemConf); LOG.info("Tajo Master is initialized."); }
From source file:org.apache.tajo.master.TestExecutionBlockCursor.java
License:Apache License
@BeforeClass public static void setUp() throws Exception { util = new TajoTestingCluster(); util.startCatalogCluster();//from w w w . jav a2s.c o m conf = util.getConfiguration(); conf.set(TajoConf.ConfVars.$TEST_BROADCAST_JOIN_ENABLED.varname, "false"); catalog = util.getMiniCatalogCluster().getCatalog(); catalog.createTablespace(DEFAULT_TABLESPACE_NAME, "hdfs://localhost:!234/warehouse"); catalog.createDatabase(DEFAULT_DATABASE_NAME, DEFAULT_TABLESPACE_NAME); TPCH tpch = new TPCH(); tpch.loadSchemas(); tpch.loadOutSchema(); for (String table : tpch.getTableNames()) { TableMeta m = CatalogUtil.newTableMeta(CatalogProtos.StoreType.CSV); TableDesc d = CatalogUtil.newTableDesc(CatalogUtil.buildFQName(DEFAULT_DATABASE_NAME, table), tpch.getSchema(table), m, CommonTestingUtil.getTestDir()); TableStats stats = new TableStats(); stats.setNumBytes(TPCH.tableVolumes.get(table)); d.setStats(stats); catalog.createTable(d); } analyzer = new SQLAnalyzer(); logicalPlanner = new LogicalPlanner(catalog); optimizer = new LogicalOptimizer(conf); StorageManager sm = StorageManager.getFileStorageManager(conf); dispatcher = new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); planner = new GlobalPlanner(conf, catalog); }