Example usage for org.apache.commons.csv CSVParser iterator

List of usage examples for org.apache.commons.csv CSVParser iterator

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVParser iterator.

Prototype

@Override
public Iterator<CSVRecord> iterator() 

Source Link

Document

Returns an iterator on the records.

Usage

From source file:assignment.CSVFileReader.java

@Override
public List<Map<String, String>> readFile(String filePath) {
    Reader reader;//  w  w  w.  j av  a 2s  . c o  m
    List<Map<String, String>> rows = new ArrayList<Map<String, String>>();
    try {
        reader = new BufferedReader(new InputStreamReader(new FileInputStream(filePath), "utf-8"));
        CSVParser csvParser = new CSVParser(reader, CSVFormat.DEFAULT);

        Iterator<CSVRecord> csvRecord = csvParser.iterator();
        CSVRecord headers = csvRecord.next();

        for (CSVRecord row : csvParser) {
            Map<String, String> item = new HashMap<String, String>();

            int colNr = 0;
            for (String header : headers) {
                String r = "";
                try {
                    r = row.get(colNr);
                } catch (Exception ex) {

                }
                item.put(header, r);
                colNr++;
            }
            rows.add(item);
        }
    } catch (Exception ex) {

    }
    return rows;
}

From source file:com.github.jferard.pgloaderutils.sniffer.csd.CSDSchemaValidator.java

/**
 * @param schema the schema to be tested
 * @param parser the CSVCRecord provider
 * @return a validation result//from   w w  w . j ava 2  s . c om
 */
public CSDValidationResult<F> validate(CSDSchema<F> schema, CSVParser parser) {
    this.result = new CSDValidationResult<F>(logger, schema);
    Iterator<CSVRecord> it = parser.iterator();
    if (it.hasNext()) {
        CSVRecord firstRecord = it.next();
        this.validateHeaderOrFirstRecord(schema, firstRecord);
        int i = 1;
        while (it.hasNext())
            this.validatorHelper.validateRecord(result, schema, it.next(), i++);

    } else {
        result.noLine();
    }
    return this.result;
}

From source file:com.miovision.oss.awsbillingtools.parser.DetailedLineItemParser.java

@Override
public Stream<DetailedLineItem> parse(Reader reader) throws IOException {
    final CSVParser csvParser = CSV_FORMAT.parse(reader);
    try {/*ww w . j a  v  a  2  s.c o m*/
        final Iterator<CSVRecord> iterator = csvParser.iterator();
        final List<String> tags = readTags(iterator);

        return StreamSupport.stream(Spliterators.spliteratorUnknownSize(iterator, Spliterator.ORDERED), false)
                .map(csvRecord -> createDetailedLineItem(csvRecord, tags)).onClose(() -> {
                    try {
                        csvParser.close();
                    } catch (IOException e) {
                        throw new RuntimeException(e);
                    }
                });
    } catch (Exception e) {
        csvParser.close();
        throw e;
    }
}

From source file:com.nordpos.device.csv.FileCSVInputOutput.java

@Override
public void startDownloadProduct() throws DeviceInputOutputException {
    try {//from  w w w  .j  ava2  s. c  om
        CSVParser parser = CSVParser.parse(inFile, StandardCharsets.UTF_8, format);
        recIterator = parser.iterator();
    } catch (IOException ex) {
        Logger.getLogger(FileCSVInputOutput.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:com.github.jferard.pgloaderutils.sniffer.csd.CSDSchemaSniffer.java

/**
 * @param schemaPattern the pattern to be tested
 * @param parser        the CSVCRecord provider
 * @param maxLine       the maximum number of lines
 * @return the real CSDSchema, or null if the pattern does not match.
 *//*from  w  w w .  j  a  va  2  s .  c om*/
public CSDSchema<F> sniff(CSDSchemaPattern<F> schemaPattern, CSVParser parser, int maxLine) {
    this.result = new CSDValidationResult<F>(logger, schemaPattern);
    Iterator<CSVRecord> it = parser.iterator();

    if (!it.hasNext()) {
        result.noLine();
        return null;
    }

    CSVRecord firstRecord = it.next();
    if (!this.validateHeaderOrFirstRecord(result, schemaPattern, firstRecord))
        return null;

    int i = 1;
    while (it.hasNext() && i < maxLine)
        this.validatorHelper.validateRecord(result, schemaPattern, it.next(), i++);

    if (this.result.errorCount() > maxLine)
        return null;

    return schemaPattern.newSchema(factory, firstRecord);
}

From source file:io.mindmaps.migration.csv.CSVDataMigrator.java

/**
 * @param entityName name to be given to the migrated entity
 * @param parser CSVparser of the file to migrate
 *///  ww w  .  ja v a 2s . com
public CSVDataMigrator configure(String entityName, CSVParser parser) {
    this.entityName = entityName;
    this.records = parser.iterator();
    this.headers = parser.getHeaderMap();
    return this;
}

From source file:com.github.jferard.pgloaderutils.sniffer.csv.CSVOptionalHeaderSniffer.java

@Override
public void sniff(final InputStream inputStream, final int size) throws IOException {
    final Reader streamReader = new InputStreamReader(inputStream, this.charset);

    final CSVParser parser = new CSVParser(streamReader, this.csvFormat);
    try {/*ww w  .ja v  a 2  s . c  o  m*/
        final Iterator<CSVRecord> iterator = parser.iterator();

        if (iterator.hasNext()) {
            final CSVRecord firstRowRecord = iterator.next();
            final int firstRowSize = firstRowRecord.size();

            final char[] firstRowSignature = this.rowSignaturesAnalyzer.getSignature(firstRowRecord,
                    firstRowSize);

            if (this.containsAtLeastOneOnlyDigitsValue(firstRowSignature)) {
                this.header = null;
            } else {
                final char[] remainingRowsSignature = this.rowSignaturesAnalyzer
                        .getRemainingRowsSignature(iterator, firstRowSize);
                if (this.containsAtLeastOneColumnWithLetterHeaderAndDigitValues(firstRowSignature,
                        remainingRowsSignature, firstRowSize)) {
                    // copy firstRow in header
                    for (final String s : firstRowRecord)
                        this.header.add(s);
                }
            }
        } else
            this.header = null;
    } finally {
        parser.close();
    }
}

From source file:com.ge.research.semtk.load.dataset.CSVDataset.java

/**
 * Initialize//from  w  w  w .ja  v  a 2 s  .  c  om
 * @param path the CSV file path
 * @param headers the headers needed for this dataset
 * @throws Exception
 */
private void initialize(String path, String[] headers) throws Exception {
    this.csvPath = path;
    CSVParser parser = getParser(new FileReader(path));
    this.recordIterator = parser.iterator();
    this.headers = headers;

    // confirm that headers passed in are available in the CSVParser (case-insensitive)
    boolean found;
    for (String header : headers) {
        found = false;
        Set<String> parserHeaders = parser.getHeaderMap().keySet();
        for (String parserHeader : parserHeaders) {
            if (parserHeader.equalsIgnoreCase(header)) {
                found = true;
                break;
            }
        }
        if (!found) {
            throw new Exception("Header '" + header + "' not found in CSV file");
        }
    }

    // print all the headers we find
    Set<String> parserHeaders = parser.getHeaderMap().keySet();
}

From source file:com.ge.research.semtk.load.dataset.CSVDataset.java

/**
 * Constructor that takes a string (either a file path or file content)
 * /*from  w ww .j  a  v a 2  s. co m*/
 * @param filePathOrContent file path, or file content
 * @param isFileContent true for file contents, false for file path
 */
public CSVDataset(String filePathOrContent, boolean isFileContent) throws Exception {

    if (isFileContent) {
        this.csvString = filePathOrContent;
    } else {
        this.csvString = FileUtils.readFileToString(new File(filePathOrContent));
    }
    CSVParser parser = getParser(new StringReader(this.csvString));
    this.recordIterator = parser.iterator();

    // get and set the headr info
    Map<String, Integer> headerMap = parser.getHeaderMap();
    this.headers = new String[headerMap.size()];

    // TODO: this test was causing other problems so it was removed.
    if (false) {
        throw new Exception("Duplicate or empty column headers on CSV file");
    }

    for (String s : headerMap.keySet()) {
        int location = headerMap.get(s);
        this.headers[location] = s;
    }
}

From source file:com.nuevebit.miroculus.mrna.cli.DatabasePopulator.java

private void parseCSV(String csv) throws IOException {
    CSVParser csvParser = CSVParser.parse(csv, CSVFormat.EXCEL);

    Iterator<CSVRecord> records = csvParser.iterator();
    // ignore headers
    records.next();/*from  w  w w  .  j  a va2 s  . c o m*/

    // read line by line
    while (records.hasNext()) {
        CSVRecord record = records.next();

        // normalize the name (remove *)
        String miRNAName = MiRNA.normalizeName(record.get(0));
        MiRNA miRNA = miRNARepository.findByName(miRNAName);

        if (miRNA == null) { // primera vez que se agrega
            miRNA = miRNARepository.save(new MiRNA(miRNAName));
        }

        String diseaseName = record.get(1).toLowerCase().trim();
        Disease disease = diseaseRepository.findByName(diseaseName);

        if (disease == null) {
            disease = diseaseRepository.save(new Disease(diseaseName));
            disease.setMortalityRate(0d);
        }

        String authorName = record.get(4).trim();
        Author author = authorRepository.findByName(authorName);

        if (author == null) {
            author = authorRepository.save(new Author(authorName));
        }

        String publicationTitle = record.get(6).trim();
        String publicationJournal = record.get(5).trim();

        Publication pub = publicationRepository.findByNameAndJournal(publicationTitle, publicationJournal);

        if (pub == null) {
            pub = new Publication(publicationTitle, publicationJournal);
            pub.setAuthor(author);
            String year = record.get(7);
            pub.setYear(Integer.valueOf(year));
            pub.setDescription(record.get(9).trim());

            pub = publicationRepository.save(pub);

        }

        String methodName = record.get(8).trim();
        DiscoveryMethod method = discoveryMethodRepository.findByName(methodName);

        if (method == null) {
            method = discoveryMethodRepository.save(new DiscoveryMethod(methodName));
        }

        CorrelationDiscovery correlation = new CorrelationDiscovery(miRNA, disease,
                Integer.valueOf(record.get(2)));

        correlation.setPublication(pub);
        correlation.setMethod(method);

        // save the found correlation
        correlationDiscoveryRepository.save(correlation);
    }
}