List of usage examples for org.springframework.context.annotation AnnotationConfigApplicationContext AnnotationConfigApplicationContext
public AnnotationConfigApplicationContext(String... basePackages)
From source file:com.khartec.waltz.jobs.sample.FlowGenerator.java
public static void main(String[] args) { AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(DIConfiguration.class); AuthoritativeSourceDao authSourceDao = ctx.getBean(AuthoritativeSourceDao.class); ApplicationService applicationDao = ctx.getBean(ApplicationService.class); DataTypeService dataTypesDao = ctx.getBean(DataTypeService.class); DataFlowService dataFlowDao = ctx.getBean(DataFlowService.class); OrganisationalUnitService orgUnitDao = ctx.getBean(OrganisationalUnitService.class); DataSource dataSource = ctx.getBean(DataSource.class); DSLContext dsl = ctx.getBean(DSLContext.class); List<AuthoritativeSource> authSources = authSourceDao.findByEntityKind(EntityKind.ORG_UNIT); List<String> dataTypes = dataTypesDao.getAll().stream().map(dt -> dt.code()).collect(toList()); List<Application> apps = applicationDao.findAll(); List<OrganisationalUnit> orgUnits = orgUnitDao.findAll(); Set<DataFlow> expectedFlows = authSources.stream().flatMap(a -> { long orgUnitId = a.parentReference().id(); return IntStream.range(0, rnd.nextInt(40)) .mapToObj(i -> ImmutableDataFlow.builder().dataType(a.dataType()) .source(a.applicationReference()).target(randomAppPick(apps, orgUnitId)).build()); }).collect(Collectors.toSet()); Set<DataFlow> probableFlows = authSources.stream().flatMap(a -> IntStream.range(0, rnd.nextInt(30)) .mapToObj(i -> ImmutableDataFlow.builder().dataType(a.dataType()).source(a.applicationReference()) .target(randomAppPick(apps, randomPick(orgUnits).id().get())).build())) .collect(Collectors.toSet()); Set<DataFlow> randomFlows = apps.stream() .map(a -> ImmutableDataFlow.builder().source(a.toEntityReference())) .map(b -> b.target(randomAppPick(apps, randomPick(orgUnits).id().get()))) .map(b -> b.dataType(randomPick(dataTypes)).build()).collect(Collectors.toSet()); dsl.deleteFrom(DATA_FLOW).execute(); Set<DataFlow> all = new HashSet<>(); all.addAll(randomFlows);/*from w w w . j av a 2 s .c o m*/ all.addAll(expectedFlows); all.addAll(probableFlows); dataFlowDao.addFlows(new ArrayList<>(all)); System.out.println("Done"); }
From source file:de.bstreit.java.oscr.initialdata.LoadInitialDataApp.java
public static void main(String[] args) throws BeansException, SQLException { System.out.println("Loading..."); try (final AbstractApplicationContext context = new AnnotationConfigApplicationContext( SpringConfigurationDoesComponentScan.class)) { context.getBean(LoadInitialDataApp.class).start(); } catch (RuntimeException e) { if (!"Aborted".equals(e.getMessage())) { throw e; }//from w ww . j a va 2s . c o m } }
From source file:internal.diff.common.launcher.SampleApplication.java
public static void main(String[] args) throws IOException { ApplicationContext context = new AnnotationConfigApplicationContext(SampleApplication.class); AmazonS3DirectoryMetadataService s3DirectoryMetadataService = context .getBean(AmazonS3DirectoryMetadataService.class); FileSystemDirectoryMetadataService fileSystemDirectoryMetadataService = context .getBean(FileSystemDirectoryMetadataService.class); DirectoryMetadataDifferenceService differenceService = context .getBean(DirectoryMetadataDifferenceService.class); log.info("The metadata of S3 bucket '{}' with prefix '{}' is going to be read.", S3_BUCKET, S3_PREFIX); DirectoryMetadata directory2 = s3DirectoryMetadataService.getMetadata(S3_BUCKET, S3_PREFIX); log.info("The metadata of file system directory '{}' is going to be read.", FILE_SYSTEM_DIRECTORY); DirectoryMetadata directory1 = fileSystemDirectoryMetadataService.getMetadata(FILE_SYSTEM_DIRECTORY); log.info("The differences between the metadata is being calculated."); DirectoryMetadataDifference difference = differenceService.getDifference(directory1, directory2); new ObjectMapper().writerWithDefaultPrettyPrinter().writeValue(System.out, difference); }
From source file:org.marat.workflow.demo.app.App.java
public static void main(final String[] args) { try (final AnnotationConfigApplicationContext applicationContext = new AnnotationConfigApplicationContext( DemoConfiguration.class)) { applicationContext.getBean(App.class).run(); }/*www . jav a 2s . co m*/ }
From source file:com.khartec.waltz.jobs.InvolvementHarness2.java
public static void main(String[] args) throws InterruptedException { AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(DIConfiguration.class); DSLContext dsl = ctx.getBean(DSLContext.class); DataSource dataSource = ctx.getBean(DataSource.class); InvolvementDao dao = ctx.getBean(InvolvementDao.class); time("hmm", () -> foo(dsl, "NravvV2Is")); // time("findAllApps", () -> dao.findAllApplicationsByEmployeeId("NravvV2Is")); }
From source file:com.khartec.waltz.jobs.sample.BusinessRegionProductHierarchyGenerator.java
public static void main(String[] args) throws IOException { AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(DIConfiguration.class); DSLContext dsl = ctx.getBean(DSLContext.class); generateHierarchy(dsl);/*from ww w . j a v a 2 s . c o m*/ }
From source file:com.thinkbiganalytics.spark.cleanup.Cleanup.java
public static void main(String[] args) { log.info("Running Cleanup with these command line args: " + StringUtils.join(args, ",")); if (args.length < 2) { System.out.println("Expected command line args: <hive-schema-name> <hive-table-name>"); System.exit(1);//w ww . ja va 2 s .com } try { ApplicationContext ctx = new AnnotationConfigApplicationContext("com.thinkbiganalytics.spark"); Cleanup app = ctx.getBean(Cleanup.class); app.setArguments(args[0], args[1]); app.doCleanup(); } catch (Exception e) { log.error("Failed to perform cleanup: {}", e.getMessage()); System.exit(1); } log.info("Cleanup has finished."); }
From source file:com.thinkbiganalytics.spark.dataquality.checker.DataQualityChecker.java
public static void main(String[] args) { log.info("Running DataQualityChecker with these command line args: " + StringUtils.join(args, ",")); if (args.length < 2) { System.out.println("Expected command line args: <hive-schema-name> <hive-table-name>"); System.exit(1);/* w w w. j a va2s . c om*/ } try { ApplicationContext ctx = new AnnotationConfigApplicationContext("com.thinkbiganalytics.spark"); DataQualityChecker app = ctx.getBean(DataQualityChecker.class); app.setArguments(args[0], args[1]); app.doDataQualityChecks(); } catch (Exception e) { log.error("Failed to perform data quality checks: {}", e.getMessage()); System.exit(1); } log.info("DataQualityChecker has finished."); }
From source file:com.thinkbiganalytics.spark.dataprofiler.core.Profiler.java
/** * Main entry point into program/*from w w w. ja v a2 s.c om*/ * * @param args: list of args */ public static void main(String[] args) { final ApplicationContext ctx = new AnnotationConfigApplicationContext("com.thinkbiganalytics.spark"); final Profiler profiler = new Profiler(ctx.getBean(FieldPolicyLoader.class), ctx.getBean(com.thinkbiganalytics.spark.dataprofiler.Profiler.class), ctx.getBean(ProfilerConfiguration.class), ctx.getBean(SparkContextService.class), ctx.getBean(SQLContext.class)); profiler.run(args); }
From source file:com.khartec.waltz.jobs.DataFlowHarness.java
public static void main(String[] args) throws ParseException { AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext(DIConfiguration.class); DataFlowService service = ctx.getBean(DataFlowService.class); DataFlowStatsDao dataFlowStatsDao = ctx.getBean(DataFlowStatsDao.class); DSLContext dsl = ctx.getBean(DSLContext.class); IdSelectionOptions options = ImmutableIdSelectionOptions.builder() .entityReference(ImmutableEntityReference.builder().kind(EntityKind.PERSON).id(74747).build()) .scope(HierarchyQueryScope.CHILDREN).build(); SelectJoinStep<Record1<Integer>> invCount = dsl.select(DSL.countDistinct(INVOLVEMENT.EMPLOYEE_ID).as("C")) .from(INVOLVEMENT);/* www . jav a2s .c om*/ SelectJoinStep<Record1<Integer>> appCount = dsl.select(DSL.countDistinct(APPLICATION.ID).as("C")) .from(APPLICATION); SelectOrderByStep<Record1<Integer>> union = invCount.unionAll(appCount); union.stream().forEach(System.out::println); FunctionUtilities.time("appCounts", () -> dataFlowStatsDao.countDistinctFlowInvolvement(DSL.select(APPLICATION.ID).from(APPLICATION))); // DataFlowStatistics stats = FunctionUtilities.time("full service", () -> service.calculateStats(options)); dsl.renderNamedParams(selector); empIdParam.setValue("huSs97bwj"); FunctionUtilities.time("appCounts", () -> dataFlowStatsDao.countDistinctAppInvolvement(DSL.select(APPLICATION.ID).from(APPLICATION))); Select<Record1<Long>> subQ = HarnessUtilities.time("build person sub q", () -> mkForPersonReportees("huSs97bwj")); HarnessUtilities.time("build complex q", () -> bigQuery(dsl, mkForPersonReportees("huSs97bwj"))); }