Example usage for com.fasterxml.jackson.dataformat.csv CsvMapper enable

List of usage examples for com.fasterxml.jackson.dataformat.csv CsvMapper enable

Introduction

In this page you can find the example usage for com.fasterxml.jackson.dataformat.csv CsvMapper enable.

Prototype

public CsvMapper enable(CsvParser.Feature f) 

Source Link

Usage

From source file:db.migration.util.DbUtil.java

public static Iterator<Object[]> readCsv(URL csvUrl) throws Exception {
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
    return mapper.reader(String[].class).readValues(csvUrl);
}

From source file:com.couchbase.devex.CSVConfig.java

@Override
public Observable<Document> startImport() {
    FileInputStream csvFile;/*from  w  ww. j ava 2  s  .  co  m*/
    try {
        csvFile = new FileInputStream(getCsvFilePath());
        CsvMapper mapper = new CsvMapper();
        mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
        CsvSchema csvSchema = CsvSchema.emptySchema().withColumnSeparator(getColumnSeparator())
                .withQuoteChar(getQuoteChar());
        ObjectReader reader = mapper.reader(String[].class);
        MappingIterator<String[]> it = reader.with(csvSchema).readValues(csvFile);
        if (!getSkipFirstLineForNames()) {
            String[] firstline = it.next();
            updateColumnNames(firstline);
        }
        return Observable.from(new Iterable<String[]>() {
            @Override
            public Iterator<String[]> iterator() {
                return it;
            }
        }).flatMap(line -> createNode(line));
    } catch (FileNotFoundException e) {
        return Observable.error(e);
    } catch (IOException e) {
        return Observable.error(e);
    }
}

From source file:edu.cmu.cs.lti.discoursedb.annotation.lightside.io.LightSideService.java

/**
 * Imports a file that was previously generated by exportDataForAnnotation() and then annotated by LightSide classifiers
 * /*from  www.  ja v  a2 s  .c o m*/
 * @param inputFilePath path to the file that should be imported
 */
public void importAnnotatedData(String inputFilePath) {
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
    File csvFile = new File(inputFilePath);
    try {
        MappingIterator<String[]> it = mapper.readerFor(String[].class).readValues(csvFile);

        //process header
        String[] header = it.next();
        Map<String, Integer> headerId = new HashMap<>();
        for (int i = 0; i < header.length; i++) {
            headerId.put(header[i], i);
        }

        //process data
        while (it.hasNext()) {
            String[] row = it.next();
            Contribution curContrib = null;
            List<AnnotationInstance> curAnnos = new ArrayList<>();
            for (int i = 0; i < row.length; i++) {
                String field = row[i];
                if (i == headerId.get(TEXT_COL)) {
                    //we don't need the text column
                } else if (i == headerId.get(ID_COL)) {
                    curContrib = contribService.findOne(Long.parseLong(field)).orElseThrow(
                            () -> new EntityNotFoundException("Cannot find annotated entity in database."));
                } else {
                    //we don't need to create an annotation if it's a binary label set to false
                    if (!field.equalsIgnoreCase(LABEL_MISSING_VAL)) {
                        String label = header[i].split(LIGHTSIDE_PREDICTION_COL_SUFFIX)[0]; //remove suffix from label if it exists                    
                        AnnotationInstance newAnno = annoService.createTypedAnnotation(label);
                        annoService.saveAnnotationInstance(newAnno);
                        curAnnos.add(newAnno);
                        //if we have any other value than true or false, store this value as a feature
                        if (!field.equalsIgnoreCase(LABEL_ASSIGNED_VAL)) {
                            Feature newFeat = annoService.createFeature(field);
                            annoService.saveFeature(newFeat);
                            annoService.addFeature(newAnno, newFeat);
                        }
                    }
                }
            }
            //wipe old annotations  
            //TODO we might not want to delete ALL annotations
            delete(annoService.findAnnotations(curContrib));

            //add new annotations to the contribution it belongs to 
            for (AnnotationInstance newAnno : curAnnos) {
                annoService.addAnnotation(curContrib, newAnno);
            }
        }
    } catch (IOException e) {
        log.error("Error reading and parsing data from csv");
    }
}

From source file:nl.esciencecenter.ptk.csv.CSVData.java

public void parseText(String csvText) throws IOException {
    // Extended CSV !
    // Pass I: remove comments including the ending newline!
    Pattern pat = Pattern.compile("^#.*\n", Pattern.MULTILINE);
    csvText = pat.matcher(csvText).replaceAll("");

    // todo: check how jackson can parse alternative field separators;
    if (fieldSeparators != null) {
        // csvText=csvText.replaceAll(",","_");

        for (String sep : fieldSeparators) {
            // lazy replace
            csvText = csvText.replaceAll(sep, ",");
        }//w ww .  j a  va 2s  .  c  om
    }

    // Not needed: Pass II: remove empty lines as a result of the
    // pat=Pattern.compile("\n\n",Pattern.MULTILINE);
    // newTxt=pat.matcher(newTxt).replaceAll("");

    // ObjectMapper mapper=new ObjectMapper();
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);

    MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(csvText);

    if (it.hasNext() == false) {
        throw new IOException("Empty text or csv text contains no headers!");
    }

    // read header:
    Object headers[] = it.next();

    StringList list = new StringList();
    for (int i = 0; i < headers.length; i++) {
        list.add(headers[i].toString());
    }

    logger.debugPrintf("Headers=%s\n", list.toString("<>"));
    headerList = list;

    data = new ArrayList<String[]>();

    // check header values.
    while (it.hasNext()) {
        Object line[] = it.next();
        String row[] = new String[line.length];

        for (int j = 0; j < line.length; j++) {
            Object value = line[j];
            if (value != null) {
                row[j] = value.toString();
            }
        }
        data.add(row);
    }

    logger.debugPrintf("Read %d number of rows\n", data.size());
}

From source file:net.flutterflies.fwapaderp.game.TeamManager.java

/**
 * Converts a .csv spreadsheet template into a UHCTeam object
 *
 * @param teamsList Used purely as a reference to an earlier object, overwriting it
 * @return A list of all UHCTeams/*from w  w w  .  j  ava 2 s .c  o  m*/
 */
public ArrayList<UHCTeam> createTeamsFromCSV(ArrayList<UHCTeam> teamsList) {
    ArrayList<String[]> rows = new ArrayList<>();
    File teamsFile = new File(plugin.getDataFolder(), "teams.csv");
    CsvMapper mapper = new CsvMapper();

    //Clear any existing teams on the team list
    teamsList.clear();

    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);

    //Try to load values from teams.csv
    try {
        MappingIterator<String[]> iterator = mapper.readerFor(String[].class).readValues(teamsFile);
        while (iterator.hasNext()) {
            rows.add(rows.size(), iterator.next());
        }
    } catch (IOException e) {
        plugin.getLogger().log(Level.SEVERE, "Could not find the file teams.csv! Please either supply"
                + "a teams.csv file or disable usePreMadeTeams in the plugin's config file.");
        System.exit(0);
    }

    //For each row in the csv file create a new team
    for (int i = 1; i < rows.size(); i++) {
        String[] team = rows.get(i);
        List<String> teamPlayerList = new ArrayList<>();
        for (int j = 2; j < team.length; j++) {
            if (!team[j].equals("")) {
                teamPlayerList.add(teamPlayerList.size(), team[j]);
            }
        }
        teamsList.add(teamsList.size(),
                new UHCTeam(team[0], team[1].toUpperCase().replace(' ', '_'), teamPlayerList));
    }

    //Write Teams to a yaml file
    for (int i = 0; i < teamList.size(); i++) {
        //Get the team
        UHCTeam team = teamList.get(i);

        //Write the team name
        plugin.getTeamConfig().set("teams.team" + (i + 1) + ".name", team.getTeamName(false));
        //Write the team's color
        plugin.getTeamConfig().set("teams.team" + (i + 1) + ".color", team.getTeamColor());
        //Write all the players in the team
        for (int j = 0; j < team.getTeamSize(); j++) {
            plugin.getTeamConfig().set("teams.team" + (i + 1) + ".players.player" + (j + 1),
                    team.getPlayers().get(j));
        }
    }
    plugin.saveTeamsConfig();

    return teamsList;
}

From source file:org.gitia.jdataanalysis.JDataAnalysis.java

private void obtainData() {
    try {/*from ww w.j a v  a 2  s.c  om*/
        CSVFormat csvf;
        if (this.isHeader) {
            csvf = CSVFormat.DEFAULT.withHeader();

            parser = new CSVParser(new FileReader(path), csvf);
            datos = IteratorUtils.toList(parser.iterator());
            data = new String[datos.size()][datos.get(0).size()];
            for (int i = 0; i < datos.size(); i++) {
                for (int j = 0; j < datos.get(0).size(); j++) {
                    data[i][j] = datos.get(i).get(j);
                }
            }

        } else {
            csvf = CSVFormat.DEFAULT.withIgnoreHeaderCase(isHeader);

            CsvMapper mapper = new CsvMapper();
            // important: we need "array wrapping" (see next section) here:
            mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
            File csvFile = new File("src/main/resources/handwrittennumbers/mnist_train_in.csv"); // or from String, URL etc
            MappingIterator<double[]> it = mapper.readerFor(double[].class).readValues(csvFile);
            int a = 1;
            List<double[]> listData = it.readAll();
            double[][] data = new double[listData.size()][listData.get(0).length];
            for (int i = 0; i < listData.size(); i++) {
                data[i] = listData.get(i);
                System.out.println(a++ + ":\t");
            }
            SimpleMatrix A = new SimpleMatrix(data);
            A.print();

        }
        parser = new CSVParser(new FileReader(path), csvf);
        datos = IteratorUtils.toList(parser.iterator());
        data = new String[datos.size()][datos.get(0).size()];
        for (int i = 0; i < datos.size(); i++) {
            for (int j = 0; j < datos.get(0).size(); j++) {
                data[i][j] = datos.get(i).get(j);
            }
        }
    } catch (IOException ex) {
        Logger.getLogger(JDataAnalysis.class.getName()).log(Level.SEVERE, null, ex);
    }
}