List of usage examples for org.springframework.batch.item.file.transform DelimitedLineTokenizer setDelimiter
public void setDelimiter(String delimiter)
From source file:my.sandbox.spring.batch.demo.readers.ProductReader.java
public ProductReader() { DelimitedLineTokenizer tokenizer = new DelimitedLineTokenizer(); tokenizer.setNames(new String[] { "description", "price", "purchaseDate" }); tokenizer.setDelimiter(DelimitedLineTokenizer.DELIMITER_TAB); BeanWrapperFieldSetMapper<Product> mapper = new BeanWrapperFieldSetMapper<>(); mapper.setTargetType(Product.class); DefaultLineMapper<Product> defaultLineMapper = new DefaultLineMapper<>(); defaultLineMapper.setLineTokenizer(tokenizer); defaultLineMapper.setFieldSetMapper(mapper); setLineMapper(defaultLineMapper);/*from w w w. j ava 2s .co m*/ }
From source file:org.my.spring.batch.java.config.demo.readers.ProductReader.java
@Autowired public ProductReader(@Value("#{jobParameters[inputFile]}") String inputFile) { setResource(new FileSystemResource(inputFile)); DelimitedLineTokenizer tokenizer = new DelimitedLineTokenizer(); tokenizer.setNames(new String[] { "description", "price", "purchaseDate" }); tokenizer.setDelimiter(DelimitedLineTokenizer.DELIMITER_TAB); BeanWrapperFieldSetMapper<Product> mapper = new BeanWrapperFieldSetMapper<>(); mapper.setTargetType(Product.class); DefaultLineMapper<Product> defaultLineMapper = new DefaultLineMapper<>(); defaultLineMapper.setLineTokenizer(tokenizer); defaultLineMapper.setFieldSetMapper(mapper); setLineMapper(defaultLineMapper);//from w w w.ja v a 2 s .co m }
From source file:com.springsource.html5expense.config.BatchConfig.java
@Bean @Scope("step")/*from w w w . j a v a 2 s .c o m*/ public FlatFileItemReader reader(@Value("#{jobParameters[file]}") String resource) { File f = new File(this.batchFileDirectory, resource + ".csv"); DelimitedLineTokenizer del = new DelimitedLineTokenizer(); del.setNames("date,amount,category,merchant".split(",")); del.setDelimiter(DelimitedLineTokenizer.DELIMITER_COMMA); DefaultLineMapper<FieldSet> defaultLineMapper = new DefaultLineMapper<FieldSet>(); defaultLineMapper.setLineTokenizer(del); defaultLineMapper.setFieldSetMapper(new PassThroughFieldSetMapper()); defaultLineMapper.afterPropertiesSet(); FlatFileItemReader<FieldSet> fileItemReader = new FlatFileItemReader<FieldSet>(); fileItemReader.setLineMapper(defaultLineMapper); fileItemReader.setResource(new FileSystemResource(f)); return fileItemReader; }
From source file:org.jasig.ssp.util.importer.job.csv.RawItemCsvReader.java
private LineTokenizer getTokenizer(String line) { this.columnNames = line.split(delimiter); DelimitedLineTokenizer lineTokenizer = new DelimitedLineTokenizer(); lineTokenizer.setQuoteCharacter(quoteCharacter); lineTokenizer.setDelimiter(delimiter); lineTokenizer.setStrict(false);//from w w w .ja v a2 s .c o m lineTokenizer.setNames(columnNames); addColumnNames(); return lineTokenizer; }
From source file:org.cbioportal.annotation.pipeline.MutationRecordReader.java
@Override public void open(ExecutionContext ec) throws ItemStreamException { processComments(ec);/* w w w .ja va 2 s. com*/ FlatFileItemReader<MutationRecord> reader = new FlatFileItemReader<>(); reader.setResource(new FileSystemResource(filename)); DefaultLineMapper<MutationRecord> mapper = new DefaultLineMapper<>(); final DelimitedLineTokenizer tokenizer = new DelimitedLineTokenizer(); tokenizer.setDelimiter("\t"); mapper.setLineTokenizer(tokenizer); mapper.setFieldSetMapper(new MutationFieldSetMapper()); reader.setLineMapper(mapper); reader.setLinesToSkip(1); reader.setSkippedLinesCallback(new LineCallbackHandler() { @Override public void handleLine(String line) { tokenizer.setNames(line.split("\t")); } }); reader.open(ec); LOG.info("Loading records from: " + filename); MutationRecord mutationRecord; try { while ((mutationRecord = reader.read()) != null) { mutationRecords.add(mutationRecord); } } catch (Exception e) { throw new ItemStreamException(e); } reader.close(); int variantsToAnnotateCount = mutationRecords.size(); int annotatedVariantsCount = 0; LOG.info(String.valueOf(variantsToAnnotateCount) + " records to annotate"); for (MutationRecord record : mutationRecords) { annotatedVariantsCount++; if (annotatedVariantsCount % 2000 == 0) { LOG.info("\tOn record " + String.valueOf(annotatedVariantsCount) + " out of " + String.valueOf(variantsToAnnotateCount) + ", annotation " + String.valueOf((int) (((annotatedVariantsCount * 1.0) / variantsToAnnotateCount) * 100)) + "% complete"); } // save variant details for logging String variantDetails = "(sampleId,chr,start,end,ref,alt,url)= (" + record.getTUMOR_SAMPLE_BARCODE() + "," + record.getCHROMOSOME() + "," + record.getSTART_POSITION() + "," + record.getEND_POSITION() + "," + record.getREFERENCE_ALLELE() + "," + record.getTUMOR_SEQ_ALLELE2() + "," + annotator.getUrlForRecord(record, isoformOverride) + ")"; // init annotated record w/o genome nexus in case server error occurs // if no error then annotated record will get overwritten anyway with genome nexus response String serverErrorMessage = ""; AnnotatedRecord annotatedRecord = new AnnotatedRecord(record); try { annotatedRecord = annotator.annotateRecord(record, replace, isoformOverride, true); } catch (HttpServerErrorException ex) { serverErrorMessage = "Failed to annotate variant due to internal server error"; } catch (HttpClientErrorException ex) { serverErrorMessage = "Failed to annotate variant due to client error"; } catch (HttpMessageNotReadableException ex) { serverErrorMessage = "Failed to annotate variant due to message not readable error"; } catch (GenomeNexusAnnotationFailureException ex) { serverErrorMessage = "Failed to annotate variant due to Genome Nexus : " + ex.getMessage(); } annotatedRecords.add(annotatedRecord); header.addAll(annotatedRecord.getHeaderWithAdditionalFields()); // log server failure message if applicable if (!serverErrorMessage.isEmpty()) { LOG.warn(serverErrorMessage); failedAnnotations++; failedServerAnnotations++; if (errorReportLocation != null) updateErrorMessages(record, record.getVARIANT_CLASSIFICATION(), annotator.getUrlForRecord(record, isoformOverride), serverErrorMessage); continue; } String annotationErrorMessage = ""; if (MafUtil.variantContainsAmbiguousTumorSeqAllele(record.getREFERENCE_ALLELE(), record.getTUMOR_SEQ_ALLELE1(), record.getTUMOR_SEQ_ALLELE2())) { snpAndIndelVariants++; annotationErrorMessage = "Record contains ambiguous SNP and INDEL allele change - SNP allele will be used"; } if (annotatedRecord.getHGVSC().isEmpty() && annotatedRecord.getHGVSP().isEmpty()) { if (annotator.isHgvspNullClassifications(annotatedRecord.getVARIANT_CLASSIFICATION())) { failedNullHgvspAnnotations++; annotationErrorMessage = "Ignoring record with HGVSp null classification '" + annotatedRecord.getVARIANT_CLASSIFICATION() + "'"; } else { annotationErrorMessage = "Failed to annotate variant"; } failedAnnotations++; } if (!annotationErrorMessage.isEmpty()) { if (verbose) LOG.info(annotationErrorMessage + ": " + variantDetails); if (errorReportLocation != null) updateErrorMessages(record, annotatedRecord.getVARIANT_CLASSIFICATION(), annotator.getUrlForRecord(record, isoformOverride), annotationErrorMessage); } } // print summary statistics and save error messages to file if applicable printSummaryStatistics(failedAnnotations, failedNullHgvspAnnotations, snpAndIndelVariants, failedServerAnnotations); if (errorReportLocation != null) { saveErrorMessagesToFile(errorMessages); } ec.put("mutation_header", new ArrayList(header)); }