Example usage for org.apache.commons.csv CSVRecord size

List of usage examples for org.apache.commons.csv CSVRecord size

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVRecord size.

Prototype

public int size() 

Source Link

Document

Returns the number of values in this record.

Usage

From source file:com.github.jferard.pgloaderutils.loader.CSVRecordCleanerExample.java

@Override
public Iterable<String> cleanRecord(final CSVRecord record) {
    return new Iterable<String>() {
        @Override//from   ww w. j  a v  a2  s. c o m
        public Iterator<String> iterator() {
            return new Iterator<String>() {
                private int i = 0;

                @Override
                public boolean hasNext() {
                    return this.i < record.size();
                }

                @Override
                public String next() {
                    String s = record.get(this.i);
                    if (this.i == 11 || this.i == 12 || this.i == 16) // numbers
                        s = s.replaceAll(",", "."); // from continental to US

                    this.i++;
                    return s;
                }

                @Override
                public void remove() {
                    throw new UnsupportedOperationException();
                }
            };
        }
    };
}

From source file:net.sourceforge.ganttproject.io.GanttCSVOpen.java

/**
 * Create tasks from file./*  w  w w . j a v a2 s  . c  om*/
 *
 * @throws IOException
 *           on parse error or input read-failure
 */
public boolean load() throws IOException {
    CSVParser parser = new CSVParser(myInputSupplier.get(),
            CSVFormat.DEFAULT.withEmptyLinesIgnored(false).withSurroundingSpacesIgnored(true));
    int numGroup = 0;
    RecordGroup currentGroup = null;
    boolean searchHeader = true;
    List<CSVRecord> records = parser.getRecords();
    for (CSVRecord record : records) {
        if (record.size() == 0) {
            // If line is empty then current record group is probably finished.
            // Let's search for the next group header.
            searchHeader = true;
            continue;
        }
        if (searchHeader) {
            // Record is not empty and we're searching for header.
            if (numGroup < myRecordGroups.size() && myRecordGroups.get(numGroup).isHeader(record)) {
                // If next group acknowledges the header, then we give it the turn,
                // otherwise it was just an empty line in the current group
                searchHeader = false;
                currentGroup = myRecordGroups.get(numGroup);
                parser.readHeader(record);
                currentGroup.setHeader(Lists.newArrayList(record.iterator()));
                numGroup++;
                continue;
            }
            searchHeader = false;
        }
        assert currentGroup != null;
        currentGroup.process(record);
    }
    for (RecordGroup group : myRecordGroups) {
        group.postProcess();
    }
    // Succeeded
    return true;
}

From source file:com.marklogic.contentpump.DelimitedTextReader.java

protected String[] getLine() throws IOException {
    CSVRecord record = (CSVRecord) parserIterator.next();
    Iterator<String> recordIterator = record.iterator();
    int recordSize = record.size();
    String[] values = new String[recordSize];
    for (int i = 0; i < recordSize; i++) {
        if (recordIterator.hasNext()) {
            values[i] = (String) recordIterator.next();
        } else {//from w  ww.  j a  va 2s.c o m
            throw new IOException("Record size doesn't match the real size");
        }
    }
    return values;
}

From source file:br.edimarmanica.trinity.intrasitemapping.auto.MappingController.java

private void reading() {
    /**/*from  w  w  w  .  j a v  a 2 s  . c om*/
     * Lendos os Run02.NR_SHARED_PAGES primeiros elementos de cada offset
     */
    File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/offset");

    for (int nrOffset = 0; nrOffset < dir.listFiles(new FilenameFilter() {
        @Override
        public boolean accept(File dir, String name) {
            return name.endsWith(".csv");
        }
    }).length; nrOffset++) {
        List<List<String>> offset = new ArrayList<>(); //cada arquivo  um offset

        try (Reader in = new FileReader(dir.getAbsoluteFile() + "/result_" + nrOffset + ".csv")) {
            try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL)) {
                int nrRegistro = 0;
                for (CSVRecord record : parser) {
                    if (nrRegistro >= Extract.NR_SHARED_PAGES) {
                        break;
                    }

                    for (int nrRegra = 0; nrRegra < record.size(); nrRegra++) {
                        if (nrRegistro == 0) {
                            List<String> regra = new ArrayList<>();
                            try {
                                regra.add(Preprocessing.filter(record.get(nrRegra)));
                            } catch (InvalidValue ex) {
                                regra.add("");
                            }
                            offset.add(regra);
                        } else {
                            try {
                                offset.get(nrRegra).add(Preprocessing.filter(record.get(nrRegra)));
                            } catch (InvalidValue ex) {
                                offset.get(nrRegra).add("");
                            }
                        }
                    }
                    nrRegistro++;
                }
            }
            offsets.add(offset);
        } catch (FileNotFoundException ex) {
            Logger.getLogger(MappingController.class.getName()).log(Level.SEVERE, null, ex);
        } catch (IOException ex) {
            Logger.getLogger(MappingController.class.getName()).log(Level.SEVERE, null, ex);
        }
    }

    /**
     * Mostrando a leitura
     */
    /*for (int i = 1; i < offsets.size(); i++) {
    for (int j = 0; j < 5; j++) {
        System.out.print(offsets.get(i).get(0).get(j) + " - ");
    }
    System.out.println("");
    }*/
}

From source file:com.itemanalysis.jmetrik.file.JmetrikFileImporter.java

/**
 * Create a header map to the CSV file, but imposes naming conventions on the column names.
 *
 *///  w  ww.  j  av a2 s  .co m
private void setVariableAttributes() {
    VariableAttributes variableAttributes = null;
    int position = 0;

    Reader reader = null;
    CSVParser parser = null;
    VariableName tempName = null;

    try {
        reader = new InputStreamReader(new BOMInputStream(new FileInputStream(dataFile)), "UTF-8");
        parser = new CSVParser(reader, dataFileFormat.withHeader());

        if (hasHeader) {
            Map<String, Integer> csvMap = parser.getHeaderMap();
            for (String s : csvMap.keySet()) {
                variableAttributes = new VariableAttributes(new VariableName(s), new VariableLabel(""),
                        DataType.INTEGER, position);
                variableAttributeMap.put(variableAttributes.getName(), variableAttributes);
                position++;
            }
        } else {
            Iterator<CSVRecord> iter = parser.iterator();
            CSVRecord csvRecord = iter.next();

            for (int i = 0; i < csvRecord.size(); i++) {
                variableAttributes = new VariableAttributes(new VariableName("v" + (i + 1)),
                        new VariableLabel(""), DataType.INTEGER, position);
                variableAttributeMap.put(variableAttributes.getName(), variableAttributes);
                position++;
            }
        }

    } catch (IOException ex) {
        theException = ex;
    } finally {
        try {
            if (parser != null)
                parser.close();
            if (reader != null)
                reader.close();
        } catch (IOException ex) {
            theException = ex;
        }
    }
}

From source file:ch.silviowangler.i18n.ResourceBundler.java

private void processData(CSVRecord record) throws UnsupportedEncodingException {

    String key = record.get(0);/* www.  j  a  v a  2  s.co  m*/

    if (key.isEmpty() || key.trim().isEmpty()) {
        LOGGER.warn("Record has no key {}", record);
        return;
    }

    for (int i = 1; i < record.size(); i++) {
        this.propertiesStore.get(i - 1).put(key, convertIfNecessary(record.get(i)));
    }
    LOGGER.info("Successfully parsed {} records", this.propertiesStore.get(0).size());
}

From source file:com.thinkbiganalytics.discovery.parsers.csv.CSVFileSchemaParser.java

private DefaultFileSchema populateSchema(CSVParser parser) {
    DefaultFileSchema fileSchema = new DefaultFileSchema();
    int i = 0;//from  w w  w .  j  av  a 2s. c om
    ArrayList<Field> fields = new ArrayList<>();
    for (CSVRecord record : parser) {
        if (i > 9) {
            break;
        }
        int size = record.size();
        for (int j = 0; j < size; j++) {
            DefaultField field = null;
            if (i == 0) {
                field = new DefaultField();
                if (headerRow) {
                    field.setName(record.get(j));
                } else {
                    field.setName("Col_" + (j + 1));
                }
                fields.add(field);
            } else {
                try {
                    field = (DefaultField) fields.get(j);
                    field.getSampleValues().add(StringUtils.defaultString(record.get(j), ""));

                } catch (IndexOutOfBoundsException e) {
                    LOG.warn("Sample file has potential sparse column problem at row [?] field [?]", i + 1,
                            j + 1);
                }
            }
        }
        i++;
    }
    fileSchema.setFields(fields);
    return fileSchema;
}

From source file:br.edimarmanica.trinity.intrasitemapping.auto.MergeOffsets.java

private void executeOffset(int indexOffset) {
    File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/offset");
    try (Reader in = new FileReader(dir.getAbsoluteFile() + "/result_" + indexOffset + ".csv")) {
        List<List<String>> lines = new ArrayList<>();
        try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL)) {

            int indexRegistro = 0;
            for (CSVRecord record : parser) {
                if (indexOffset != 0 && indexRegistro < Extract.NR_SHARED_PAGES) { //seno vai extrair repetido
                    indexRegistro++;//from www  . j av a2s .c  om
                    continue;
                }
                List<String> line = new ArrayList<>();
                for (int nrRegra = 0; nrRegra < record.size(); nrRegra++) {
                    try {
                        line.add(Preprocessing.filter(record.get(nrRegra)));
                    } catch (InvalidValue ex) {
                        line.add("");
                    }
                }
                lines.add(line);
                indexRegistro++;
            }

            print(indexOffset, lines);
        }
    } catch (FileNotFoundException ex) {
        Logger.getLogger(MergeOffsets.class.getName()).log(Level.SEVERE, null, ex);
    } catch (IOException ex) {
        Logger.getLogger(MergeOffsets.class.getName()).log(Level.SEVERE, null, ex);
    }

}

From source file:edu.clemson.lph.utils.CSVParserWrapper.java

public CSVParserWrapper(CSVParser pIn) throws IOException {
    if (pIn == null)
        return;//  w  w  w . j av  a 2s .  c  o m
    try {
        for (CSVRecord r : pIn.getRecords()) {
            List<String> aRow = new ArrayList<String>();
            for (int i = 0; i < r.size(); i++) {
                String sField = r.get(i);
                aRow.add(sField);
            }
            aRows.add(aRow);
        }
        iRows = aRows.size();
        iCurrent = 1;
    } finally {
        pIn.close();
    }
}

From source file:convertCSV.ConvertCSV.java

/**
 * @param fichier_/*from  w w  w.  j  av a  2  s .  c  o m*/
 * @throws java.io.IOException
 */
public void importer(String fichier_) throws IOException {
    // TODO code application logic here

    float lat, lon, ele, secjour;
    int bpm;
    List<MonPoint> points = new ArrayList<>();
    Reader in = null;
    CSVParser parser;
    List<CSVRecord> list;
    GPXWriter monGPX = new GPXWriter();

    // lecture du CSV
    try {

        System.out.println("Lecture de " + fichier_);
        in = new FileReader(fichier_);

    } catch (FileNotFoundException ex) {
        Logger.getLogger(ConvertCSV.class.getName()).log(Level.SEVERE, null, ex);
    }

    parser = new CSVParser(in, CSVFormat.EXCEL);
    list = parser.getRecords();
    list.remove(0);

    // remplissage de la liste de point GPX
    if (in != null) {
        for (CSVRecord elem : list) {

            try {

                // on recupere les donnees dans le CSV
                lat = Float.parseFloat(elem.get(0));
                lon = Float.parseFloat(elem.get(1));
                ele = Float.parseFloat(elem.get(2));
                secjour = Float.parseFloat(elem.get(3));
                if (elem.size() > 4) {
                    bpm = Integer.parseInt(elem.get(4));
                    points.add(new MonPoint(lat, lon, ele, secjour, bpm));
                } else {
                    points.add(new MonPoint(lat, lon, ele, secjour));
                }

            } catch (NumberFormatException ex) {
                System.out.println(elem.toString());
            }

        }

        // ecriture du GPX
        monGPX.writePath("C:\\Users\\vincent\\Desktop\\today.gpx", "Training", points);
        in.close();

    }

    System.exit(0);
}