List of usage examples for org.springframework.batch.core JobParametersBuilder JobParametersBuilder
public JobParametersBuilder()
From source file:com.acmemotors.batch.LoadJobConfigurationTests.java
@Test public void integrationTest() throws Exception { GenericApplicationContext context = new AnnotationConfigApplicationContext(TestJobConfiguration.class); JobLauncher launcher = context.getBean(JobLauncher.class); Job job = context.getBean(Job.class); JobParameters jobParameters = new JobParametersBuilder().addLong("delay", 50l) .addString("inputFile", new ClassPathResource("/data/sampleJourney.json").getFile().getAbsolutePath()) .toJobParameters();//from w w w. ja v a 2s .c o m JobExecution execution = launcher.run(job, jobParameters); assertEquals(execution.getStatus(), BatchStatus.COMPLETED); verify(writer, times(3)).write(anyListOf(String.class)); }
From source file:uk.ac.ebi.intact.editor.controller.dbmanager.DbImportController.java
public void launchComplexFileImport(ActionEvent evt) { if (this.uploadedFile != null && this.uploadedFile.getFileName() != null) { File[] files = saveUploadedFileTemporarily(); if (files != null) { try { JobParametersBuilder builder = new JobParametersBuilder(); this.jobId = "complexImport_" + System.currentTimeMillis(); String eMail = "intact-dev@ebi.ac.uk"; User user = userSessionController.getCurrentUser(); String userLogin = null; if (user != null && user.getEmail() != null) { eMail = user.getEmail(); userLogin = user.getLogin(); }/*from w w w .j av a2s. c o m*/ getIntactJobLauncher().run((Job) ApplicationContextProvider.getBean("complexImport"), builder.addString("MIJobId", jobId).addString("input.file", files[0].getAbsolutePath()) .addString("error.file", files[1].getAbsolutePath()) .addString("email.recipient", eMail).addString("user.login", userLogin) .toJobParameters()); addInfoMessage("Job started", "Job ID: " + jobId); } catch (JobParametersInvalidException e) { addErrorMessage("Invalid job parameters", "Job Param: " + "input.file=" + files[0].getAbsolutePath() + "error.file" + files[1].getAbsolutePath()); e.printStackTrace(); jobId = null; } catch (JobExecutionAlreadyRunningException e) { addErrorMessage("Job already running", "Job Param: " + "input.file=" + files[0].getAbsolutePath() + "error.file" + files[1].getAbsolutePath()); e.printStackTrace(); jobId = null; } catch (JobRestartException e) { addErrorMessage("Job cannot be restarted", "Job Param: " + "input.file=" + files[0].getAbsolutePath() + "error.file" + files[1].getAbsolutePath()); e.printStackTrace(); jobId = null; } catch (JobInstanceAlreadyCompleteException e) { addErrorMessage("Job already finished", "Job Param: " + "input.file=" + files[0].getAbsolutePath() + "error.file" + files[1].getAbsolutePath()); e.printStackTrace(); jobId = null; } } else { addErrorMessage("Could not upload file " + uploadedFile.getFileName(), "Import failed"); jobId = null; } } else { addErrorMessage("Could not upload file", "Import failed"); jobId = null; } }
From source file:com.inkubator.hrm.web.workingtime.WtPeriodEmpDetailController.java
public void doCalculateAttendanceRealization() { /** to cater prevent multiple click, that will make batch execute multiple time. * please see onComplete method that will set jobExecution == null */ if (jobExecution == null) { try {// ww w.jav a 2 s. c om long sleepVariable = tempAttendanceRealizationService .getTotalListTempAttendanceRealizationViewModelByWtPeriodId(searchParameter, model.getWtPeriodId().longValue()) * 3; JobParameters jobParameters = new JobParametersBuilder() .addDate("periodUntillDate", model.getUntilPeriode()) .addString("createdBy", UserInfoUtil.getUserName()).addDate("createdOn", new Date()) .addLong("wtPeriodId", model.getWtPeriodId().longValue()).toJobParameters(); jobExecution = jobLauncherAsync.run(jobTempAttendanceRealizationCalculation, jobParameters); int i = 0; while (true) { if (jobExecution.getStatus() == BatchStatus.STARTED || jobExecution.getStatus() == BatchStatus.STARTING) { if (i <= 85) { setProgress(i++); } try { Thread.sleep(sleepVariable); } catch (InterruptedException e) { } } else { setProgress(100); break; } } } catch (BussinessException ex) { jobExecution.setExitStatus(ExitStatus.FAILED); jobExecution.setStatus(BatchStatus.FAILED); jobExecution.addFailureException(ex); MessagesResourceUtil.setMessages(FacesMessage.SEVERITY_ERROR, "global.error", ex.getErrorKeyMessage(), FacesUtil.getSessionAttribute(HRMConstant.BAHASA_ACTIVE).toString()); } catch (Exception ex) { LOGGER.error("Error ", ex); } } }
From source file:com.inkubator.hrm.web.payroll.PaySalaryExecuteController.java
public void doCalculatePayroll() { if (payrollCalculationDate == null) { MessagesResourceUtil.setMessagesFlas(FacesMessage.SEVERITY_ERROR, "global.error", "salaryCalculation.payroll_date_should_be_filled", FacesUtil.getSessionAttribute(HRMConstant.BAHASA_ACTIVE).toString()); FacesContext.getCurrentInstance().validationFailed(); }//from www. j a v a 2 s. c o m /** to cater prevent multiple click, that will make batch execute multiple time. * please see onComplete method that will set jobExecution == null */ if (jobExecution == null && payrollCalculationDate != null) { try { long sleepVariable = empDataService.getTotalEmpDataNotTerminate() * 3; JobParameters jobParameters = new JobParametersBuilder() .addDate("payrollCalculationDate", payrollCalculationDate) .addDate("startPeriodDate", wtPeriodePayroll.getFromPeriode()) .addDate("endPeriodDate", wtPeriodePayroll.getUntilPeriode()) .addString("createdBy", UserInfoUtil.getUserName()).addDate("createdOn", new Date()) .toJobParameters(); jobExecution = jobLauncherAsync.run(jobPayEmployeeCalculation, jobParameters); int i = 0; while (true) { if (jobExecution.getStatus() == BatchStatus.STARTED || jobExecution.getStatus() == BatchStatus.STARTING) { if (i <= 85) { setProgress(i++); } try { Thread.sleep(sleepVariable); } catch (InterruptedException e) { } } else { setProgress(100); break; } } } catch (Exception ex) { LOGGER.error("Error ", ex); } } }
From source file:bamons.process.monitoring.service.BatchMonitoringServiceImpl.java
/** * * rawdata (json file) restore ./* w ww.java 2s . c o m*/ * * @param filePath ? * @throws Exception */ @Async @Override public void restoreRawData(String filePath) throws Exception { JobParameters jobParameters = new JobParametersBuilder().addLong("time", System.currentTimeMillis()) .addString("filePath", filePath).toJobParameters(); JobExecution execution = jobLauncher.run(restoreFileJob, jobParameters); execution.getJobId(); }
From source file:gemlite.shell.service.batch.ImportService.java
public void executeJob(String name, String tpl) { checkContextInitialized();//from ww w . j ava2s .c o m JobItem item = jobItems.get(name + tpl); LogUtil.getAppLog().info("Job:" + name + " executing..."); try { JobExecution exec = jobLauncher.run(item.job, new JobParametersBuilder() .addDate("StartTime", new Date()).addString("name", name).toJobParameters()); String s1 = DateUtil.format(exec.getCreateTime(), "HH:mm:ss.SSS"); LogUtil.getAppLog().info("Job:" + name + " start asynchronous,start time:" + s1); } catch (Exception e) { LogUtil.getCoreLog().info("Job:" + name + " failed"); if (LogUtil.getCoreLog().isErrorEnabled()) LogUtil.getCoreLog().error(name, e); } }
From source file:com.complexible.stardog.ext.spring.batch.TestSpringBatch.java
/** * This test provides a functional execution of a full batch run. There are 20 records added to * the embedded Stardog database in the Setup method of this test case * //from w w w . ja v a 2 s. co m * The batch hooks (TestBatchCallback and TestRowMapper) extract the data, marshal to the TestRecord * bean, and write it back in under a different predicate * * @throws JobExecutionAlreadyRunningException * @throws JobRestartException * @throws JobInstanceAlreadyCompleteException * @throws JobParametersInvalidException */ @Test public void integrationTest() throws JobExecutionAlreadyRunningException, JobRestartException, JobInstanceAlreadyCompleteException, JobParametersInvalidException { // Run the batch job JobParameters jobParameters = new JobParametersBuilder().addDate("startTime", new Date()).toJobParameters(); JobExecution jobEx = jobLauncher.run(simpleJob, jobParameters); // Validate we have created 20 new records with the new predicate // this uses the basic functionality in SnarlTemplate List<String> results = snarlTemplate.doWithGetter(null, "urn:test:propertyUpdate", new GetterCallback<String>() { @Override public String processStatement(Statement statement) { return statement.getObject().stringValue(); } }); assertEquals(results.size(), 20); }
From source file:com.vmware.bdd.manager.ClusterManager.java
@ClusterManagerPointcut public Long fixDiskFailures(String clusterName, String groupName) throws Exception { opsBlocker.blockUnsupportedOpsByCluster("fixDisk", clusterName); ClusterEntity cluster = clusterEntityMgr.findByName(clusterName); if (cluster == null) { logger.error("cluster " + clusterName + " does not exist"); throw BddException.NOT_FOUND("Cluster", clusterName); }/*from w w w . j ava 2 s . co m*/ SoftwareManager softMgr = softwareManagerCollector.getSoftwareManager(cluster.getAppManager()); ValidationUtils.validateVersion(clusterEntityMgr, clusterName); ClusterStatus oldStatus = cluster.getStatus(); if (!oldStatus.isActiveServiceStatus()) { throw ClusterHealServiceException.NOT_SUPPORTED(clusterName, "The cluster status must be RUNNING"); } List<NodeGroupEntity> nodeGroups; if (groupName != null) { NodeGroupEntity nodeGroup = clusterEntityMgr.findByName(clusterName, groupName); if (nodeGroup == null) { logger.error("node group " + groupName + " does not exist"); throw BddException.NOT_FOUND("group", groupName); } nodeGroups = new ArrayList<NodeGroupEntity>(1); nodeGroups.add(nodeGroup); } else { nodeGroups = clusterEntityMgr.findAllGroups(clusterName); } // only fix worker nodes that have datanode or tasktracker roles boolean workerNodesFound = false; JobParametersBuilder parametersBuilder = new JobParametersBuilder(); List<JobParameters> jobParameterList = new ArrayList<JobParameters>(); for (NodeGroupEntity nodeGroup : nodeGroups) { List<String> roles = nodeGroup.getRoleNameList(); workerNodesFound = true; for (NodeEntity node : clusterEntityMgr.findAllNodes(clusterName, nodeGroup.getName())) { if (node.isObsoleteNode()) { logger.info("Ingore node " + node.getVmName() + ", for it violate VM name convention." + "or exceed defined group instance number. "); continue; } if (clusterHealService.hasBadDisks(node.getVmName())) { logger.warn("node " + node.getVmName() + " has bad disks. Fixing it.."); boolean vmPowerOn = (node.getStatus().ordinal() != NodeStatus.POWERED_OFF.ordinal()); JobParameters nodeParameters = parametersBuilder .addString(JobConstants.CLUSTER_NAME_JOB_PARAM, clusterName) .addString(JobConstants.TARGET_NAME_JOB_PARAM, node.getVmName()) .addString(JobConstants.GROUP_NAME_JOB_PARAM, nodeGroup.getName()) .addString(JobConstants.SUB_JOB_NODE_NAME, node.getVmName()) .addString(JobConstants.IS_VM_POWER_ON, String.valueOf(vmPowerOn)).toJobParameters(); jobParameterList.add(nodeParameters); } } } if (!workerNodesFound) { throw ClusterHealServiceException.NOT_SUPPORTED(clusterName, "only support fixing disk failures for worker/non-management nodes"); } // all target nodes are healthy, simply return if (jobParameterList.isEmpty()) { logger.info("all target nodes are healthy, simply return"); throw ClusterHealServiceException.NOT_NEEDED(clusterName); } try { clusterEntityMgr.updateClusterStatus(clusterName, ClusterStatus.MAINTENANCE); clusterEntityMgr.cleanupActionError(clusterName); return jobManager.runSubJobForNodes(JobConstants.FIX_NODE_DISK_FAILURE_JOB_NAME, jobParameterList, clusterName, oldStatus, oldStatus); } catch (Exception e) { logger.error("failed to fix disk failures, " + e.getMessage()); throw e; } }