Example usage for com.fasterxml.jackson.dataformat.csv CsvMapper CsvMapper

List of usage examples for com.fasterxml.jackson.dataformat.csv CsvMapper CsvMapper

Introduction

In this page you can find the example usage for com.fasterxml.jackson.dataformat.csv CsvMapper CsvMapper.

Prototype

public CsvMapper() 

Source Link

Usage

From source file:edu.cmu.cs.lti.discoursedb.annotation.lightside.io.LightSideService.java

/**
 * Exports data in a format that can be imported into LightSide and then annotated with a classifier that was training with
 * data generated by the exportAnnotations methods.
 * /* www.  j  ava  2s  .c  o m*/
 * @param outputFilePath path to the output file to which the extracted data should be written
 * @param contributions contributions that should be exported for annotation
 */
@Transactional(readOnly = true)
public void exportDataForAnnotation(String outputFilePath, Iterable<Contribution> contributions) {
    Assert.hasText(outputFilePath, "Path to the output file cannot be empty.");
    File outputFile = new File(outputFilePath);
    Assert.isTrue(!outputFile.isDirectory(),
            outputFilePath + " points to a directory but should point to a file.");

    StringBuilder output = new StringBuilder();
    CsvMapper mapper = new CsvMapper();
    try {
        if (!outputFile.exists()) {
            //only add header once
            output.append(mapper.writeValueAsString(new String[] { TEXT_COL, ID_COL }));
        }
        for (Contribution contrib : contributions) {
            output.append(mapper.writeValueAsString(
                    new String[] { contrib.getCurrentRevision().getText(), String.valueOf(contrib.getId()) }));
        }
        FileUtils.writeStringToFile(outputFile, output.toString(), true);
    } catch (IOException e) {
        log.error("Error writing exported data to csv");
    }
}

From source file:edu.cmu.cs.lti.discoursedb.annotation.lightside.io.LightSideService.java

/**
 * Imports a file that was previously generated by exportDataForAnnotation() and then annotated by LightSide classifiers
 * /*  w ww  .j av a  2s.  c  o m*/
 * @param inputFilePath path to the file that should be imported
 */
public void importAnnotatedData(String inputFilePath) {
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
    File csvFile = new File(inputFilePath);
    try {
        MappingIterator<String[]> it = mapper.readerFor(String[].class).readValues(csvFile);

        //process header
        String[] header = it.next();
        Map<String, Integer> headerId = new HashMap<>();
        for (int i = 0; i < header.length; i++) {
            headerId.put(header[i], i);
        }

        //process data
        while (it.hasNext()) {
            String[] row = it.next();
            Contribution curContrib = null;
            List<AnnotationInstance> curAnnos = new ArrayList<>();
            for (int i = 0; i < row.length; i++) {
                String field = row[i];
                if (i == headerId.get(TEXT_COL)) {
                    //we don't need the text column
                } else if (i == headerId.get(ID_COL)) {
                    curContrib = contribService.findOne(Long.parseLong(field)).orElseThrow(
                            () -> new EntityNotFoundException("Cannot find annotated entity in database."));
                } else {
                    //we don't need to create an annotation if it's a binary label set to false
                    if (!field.equalsIgnoreCase(LABEL_MISSING_VAL)) {
                        String label = header[i].split(LIGHTSIDE_PREDICTION_COL_SUFFIX)[0]; //remove suffix from label if it exists                    
                        AnnotationInstance newAnno = annoService.createTypedAnnotation(label);
                        annoService.saveAnnotationInstance(newAnno);
                        curAnnos.add(newAnno);
                        //if we have any other value than true or false, store this value as a feature
                        if (!field.equalsIgnoreCase(LABEL_ASSIGNED_VAL)) {
                            Feature newFeat = annoService.createFeature(field);
                            annoService.saveFeature(newFeat);
                            annoService.addFeature(newAnno, newFeat);
                        }
                    }
                }
            }
            //wipe old annotations  
            //TODO we might not want to delete ALL annotations
            delete(annoService.findAnnotations(curContrib));

            //add new annotations to the contribution it belongs to 
            for (AnnotationInstance newAnno : curAnnos) {
                annoService.addAnnotation(curContrib, newAnno);
            }
        }
    } catch (IOException e) {
        log.error("Error reading and parsing data from csv");
    }
}

From source file:org.apache.nifi.processors.ParseCSV.ParseCSV.java

public static List<Map<?, ?>> readObjectsFromCsv(InputStream is) throws IOException {
    CsvSchema bootstrap = CsvSchema.emptySchema().withHeader();
    CsvMapper csvMapper = new CsvMapper();
    MappingIterator<Map<?, ?>> mappingIterator = csvMapper.reader(Map.class).with(bootstrap).readValues(is);

    return mappingIterator.readAll();
}

From source file:com.marklogic.client.test.SPARQLManagerTest.java

private MappingIterator<Map<String, String>> parseCsv(String csv) throws JsonProcessingException, IOException {
    return new CsvMapper().reader(Map.class).with(CsvSchema.emptySchema().withHeader()) // use first row as header
            .readValues(csv);//  ww w  .j a va  2  s  . co  m
}

From source file:org.gitia.jdataanalysis.JDataAnalysis.java

private void obtainData() {
    try {//w ww  .j a  v  a2  s.c  o m
        CSVFormat csvf;
        if (this.isHeader) {
            csvf = CSVFormat.DEFAULT.withHeader();

            parser = new CSVParser(new FileReader(path), csvf);
            datos = IteratorUtils.toList(parser.iterator());
            data = new String[datos.size()][datos.get(0).size()];
            for (int i = 0; i < datos.size(); i++) {
                for (int j = 0; j < datos.get(0).size(); j++) {
                    data[i][j] = datos.get(i).get(j);
                }
            }

        } else {
            csvf = CSVFormat.DEFAULT.withIgnoreHeaderCase(isHeader);

            CsvMapper mapper = new CsvMapper();
            // important: we need "array wrapping" (see next section) here:
            mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
            File csvFile = new File("src/main/resources/handwrittennumbers/mnist_train_in.csv"); // or from String, URL etc
            MappingIterator<double[]> it = mapper.readerFor(double[].class).readValues(csvFile);
            int a = 1;
            List<double[]> listData = it.readAll();
            double[][] data = new double[listData.size()][listData.get(0).length];
            for (int i = 0; i < listData.size(); i++) {
                data[i] = listData.get(i);
                System.out.println(a++ + ":\t");
            }
            SimpleMatrix A = new SimpleMatrix(data);
            A.print();

        }
        parser = new CSVParser(new FileReader(path), csvf);
        datos = IteratorUtils.toList(parser.iterator());
        data = new String[datos.size()][datos.get(0).size()];
        for (int i = 0; i < datos.size(); i++) {
            for (int j = 0; j < datos.get(0).size(); j++) {
                data[i][j] = datos.get(i).get(j);
            }
        }
    } catch (IOException ex) {
        Logger.getLogger(JDataAnalysis.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:org.wso2.developerstudio.datamapper.diagram.schemagen.util.SchemaGeneratorForCSV.java

/**
 * Read objects from CSV/* w w  w  . j a  v  a 2  s.  c  om*/
 * 
 * @param content
 * @return
 * @throws IOException
 */
public List<Map<String, String>> readObjectsFromCsv(String content) throws IOException {
    CsvMapper mapper = new CsvMapper();
    CsvSchema schema = CsvSchema.emptySchema().withHeader();
    MappingIterator<Map<String, String>> it = mapper.readerFor(Map.class).with(schema).readValues(content);
    return it.readAll();
}