Example usage for org.apache.commons.csv CSVRecord size

List of usage examples for org.apache.commons.csv CSVRecord size

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVRecord size.

Prototype

public int size() 

Source Link

Document

Returns the number of values in this record.

Usage

From source file:br.edimarmanica.trinity.check.CheckAttributeNotFound.java

private void readOffSets() {
    /**//from  w  w  w .j a  v a2s.co  m
     * Lendos os Run02.NR_SHARED_PAGES primeiros elementos de cada offset
     */
    File dir = new File(Paths.PATH_TRINITY + site.getPath() + "/offset");

    for (int nrOffset = 0; nrOffset < dir.listFiles(new FilenameFilter() {
        @Override
        public boolean accept(File dir, String name) {
            return name.endsWith(".csv");
        }
    }).length; nrOffset++) {
        List<Map<String, String>> offset = new ArrayList<>(); //cada arquivo  um offset

        try (Reader in = new FileReader(dir.getAbsoluteFile() + "/result_" + nrOffset + ".csv")) {
            try (CSVParser parser = new CSVParser(in, CSVFormat.EXCEL)) {
                int nrRegistro = 0;
                for (CSVRecord record : parser) {

                    for (int nrRegra = 0; nrRegra < record.size(); nrRegra++) {
                        String value;
                        try {
                            value = formatValue(Preprocessing.filter(record.get(nrRegra)));
                        } catch (InvalidValue ex) {
                            value = "";
                        }

                        if (nrRegistro == 0) {
                            Map<String, String> regra = new HashMap<>();
                            regra.put(record.get(0), value);
                            offset.add(regra);
                        } else {
                            offset.get(nrRegra).put(record.get(0), value);
                        }
                    }
                    nrRegistro++;
                }
            }
            offsets.add(offset);
        } catch (FileNotFoundException ex) {
            Logger.getLogger(CheckAttributeNotFound.class.getName()).log(Level.SEVERE, null, ex);
        } catch (IOException ex) {
            Logger.getLogger(CheckAttributeNotFound.class.getName()).log(Level.SEVERE, null, ex);
        }
    }
}

From source file:com.datascience.hadoop.CsvRecordReader.java

@Override
public boolean next(LongWritable key, ListWritable<Text> value) throws IOException {
    value.clear();//  w ww  . j  av a 2s.  com
    try {
        if (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            position++;
            colLength = colLength == null ? record.size() : colLength;
            if ((!record.isConsistent() || record.size() != colLength) && strict) {
                String message = String.format("%s: %s", "inconsistent record at position", position);
                throw new CsvParseException(message);
            }

            key.set(record.getRecordNumber());

            for (int i = 0; i < record.size(); i++) {
                String item = record.get(i);
                if (item == null) {
                    value.add(null);
                } else {
                    Text text = cache[i];
                    if (text == null) {
                        text = new Text();
                        cache[i] = text;
                    }
                    text.set(item);
                    value.add(text);
                }
            }
            //position = record.getCharacterPosition();
            return true;
        }

    } catch (Exception e) {
        LOGGER.warn("failed to parse record at position: " + position);
        if (strict) {
            throw e;
        } else {
            return next(key, value);
        }
    }
    return false;
}

From source file:com.github.jferard.pgloaderutils.sniffer.csv.RowSignaturesAnalyzer.java

public char[] getSignature(final CSVRecord record, final int firstRowSize) {
    final char[] signature = new char[firstRowSize];
    for (int col = 0; col < firstRowSize; col++) {
        if (col < record.size()) {
            final String s = record.get(col);
            signature[col] = this.getType(s);
        } else// ww w . ja  va  2s.c  o  m
            signature[col] = '?';
    }
    return signature;
}

From source file:ca.nrc.cadc.tap.db.AsciiTableData.java

/**
 * @return The list of formatted objects representing a row of data.
 *///from  w w w  . ja v  a 2  s. c o  m
@Override
public List<Object> next() {
    if (!hasNext()) {
        throw new IllegalStateException("No more data to read.");
    }

    CSVRecord rec = rowIterator.next();
    if (rec.size() != columnNames.size()) {
        throw new IllegalArgumentException(
                "wrong number of columns (" + rec.size() + ") expected " + columnNames.size());
    }
    try {
        List<Object> row = new ArrayList<Object>(columnNames.size());
        String cell = null;
        Object value = null;
        Format format = null;
        for (int i = 0; i < rec.size(); i++) {
            cell = rec.get(i);
            format = columnFormats.get(i);
            value = format.parse(cell);
            row.add(value);
        }
        return row;
    } catch (NumberFormatException ex) {
        throw new IllegalArgumentException("invalid number: " + ex.getMessage());
    }
}

From source file:biz.ganttproject.impex.csv.ResourceRecords.java

@Override
protected boolean doProcess(CSVRecord record) {
    if (!super.doProcess(record)) {
        return false;
    }/* w  w  w .j  a  va2 s  .c  o m*/
    if (!hasMandatoryFields(record)) {
        return false;
    }
    assert record.size() > 0;
    HumanResource hr = resourceManager.newResourceBuilder()
            .withName(getOrNull(record, ResourceFields.NAME.toString()))
            .withID(getOrNull(record, ResourceFields.ID.toString()))
            .withEmail(getOrNull(record, ResourceFields.EMAIL.toString()))
            .withPhone(getOrNull(record, ResourceFields.PHONE.toString()))
            .withRole(getOrNull(record, ResourceFields.ROLE.toString()))
            .withStandardRate(getOrNull(record, ResourceDefaultColumn.STANDARD_RATE.getName())).build();
    for (String customField : getCustomFields()) {
        String value = getOrNull(record, customField);
        if (value != null) {
            hr.addCustomProperty(
                    resourceManager.getCustomPropertyManager().getCustomPropertyDefinition(customField), value);
        }
    }
    return true;
}

From source file:com.kumarvv.setl.utils.CsvParser.java

/**
 * parse csv record//  w  ww . ja v a 2 s .  c o  m
 * @param data
 * @param record
 */
protected void processRecord(final List<Map<String, Object>> data, final CSVRecord record) {
    if (data == null || record == null) {
        return;
    }

    final Map<String, Object> row = new HashMap<>();
    for (int i = 0; i < csv.getColumns().size(); i++) {
        if (i >= record.size()) {
            break;
        }
        row.put(csv.getColumns().get(i), record.get(i));
    }
    data.add(row);
}

From source file:com.marklogic.contentpump.DelimitedTextInputFormat.java

public List<InputSplit> getSplits(JobContext job) throws IOException {
    boolean delimSplit = isSplitInput(job.getConfiguration());
    //if delimSplit is true, size of each split is determined by 
    //Math.max(minSize, Math.min(maxSize, blockSize)) in FileInputFormat
    List<InputSplit> splits = super.getSplits(job);
    if (!delimSplit) {
        return splits;
    }/*from ww w.j a  va 2 s.  co m*/

    if (splits.size() >= SPLIT_COUNT_LIMIT) {
        //if #splits > 1 million, there is enough parallelism
        //therefore no point to split
        LOG.warn("Exceeding SPLIT_COUNT_LIMIT, input_split is off:" + SPLIT_COUNT_LIMIT);
        DefaultStringifier.store(job.getConfiguration(), false, ConfigConstants.CONF_SPLIT_INPUT);
        return splits;
    }
    // add header info into splits
    List<InputSplit> populatedSplits = new ArrayList<InputSplit>();
    LOG.info(splits.size() + " DelimitedSplits generated");
    Configuration conf = job.getConfiguration();
    char delimiter = 0;
    ArrayList<Text> hlist = new ArrayList<Text>();
    for (InputSplit file : splits) {
        FileSplit fsplit = ((FileSplit) file);
        Path path = fsplit.getPath();
        FileSystem fs = path.getFileSystem(conf);

        if (fsplit.getStart() == 0) {
            // parse the inSplit, get the header
            FSDataInputStream fileIn = fs.open(path);

            String delimStr = conf.get(ConfigConstants.CONF_DELIMITER, ConfigConstants.DEFAULT_DELIMITER);
            if (delimStr.length() == 1) {
                delimiter = delimStr.charAt(0);
            } else {
                LOG.error("Incorrect delimitor: " + delimiter + ". Expects single character.");
            }
            String encoding = conf.get(MarkLogicConstants.OUTPUT_CONTENT_ENCODING,
                    MarkLogicConstants.DEFAULT_OUTPUT_CONTENT_ENCODING);
            InputStreamReader instream = new InputStreamReader(fileIn, encoding);
            CSVParser parser = new CSVParser(instream,
                    CSVParserFormatter.getFormat(delimiter, DelimitedTextReader.encapsulator, true, true));
            Iterator<CSVRecord> it = parser.iterator();

            String[] header = null;
            if (it.hasNext()) {
                CSVRecord record = (CSVRecord) it.next();
                Iterator<String> recordIterator = record.iterator();
                int recordSize = record.size();
                header = new String[recordSize];
                for (int i = 0; i < recordSize; i++) {
                    if (recordIterator.hasNext()) {
                        header[i] = (String) recordIterator.next();
                    } else {
                        throw new IOException("Record size doesn't match the real size");
                    }
                }

                EncodingUtil.handleBOMUTF8(header, 0);

                hlist.clear();
                for (String s : header) {
                    hlist.add(new Text(s));
                }
            }
            instream.close();
        }

        DelimitedSplit ds = new DelimitedSplit(new TextArrayWritable(hlist.toArray(new Text[hlist.size()])),
                path, fsplit.getStart(), fsplit.getLength(), fsplit.getLocations());
        populatedSplits.add(ds);
    }

    return populatedSplits;
}

From source file:com.siemens.sw360.portal.users.UserCSV.java

public UserCSV(CSVRecord record) {
    givenname = record.get(0);/* w  w w  . j ava 2  s.  c o m*/
    lastname = record.get(1);
    email = record.get(2);
    department = record.get(3);
    group = record.get(4);
    gid = record.get(5);
    isMale = Boolean.parseBoolean(record.get(6));
    hash = record.get(7);
    if (record.size() > 8) {
        wantsMailNotification = Boolean.parseBoolean((record.get(8)));
    }
}

From source file:co.cask.hydrator.transforms.CSVParser2.java

@Override
public void transform(StructuredRecord in, Emitter<StructuredRecord> emitter) throws Exception {
    // Field has to string to be parsed correctly. For others throw an exception.
    String body = in.get(config.field);

    // If decoder is not NONE, then apply decoder.
    byte[] decodedPayLoad;
    if (!config.decoder.equalsIgnoreCase("NONE")) {
        decodedPayLoad = decodePayLoad(body);
    } else {//www.j a va 2 s .c o  m
        decodedPayLoad = body.getBytes();
    }

    // If decompess is not NONE, then apply decompressor.
    byte[] uncompressedPayLoad = decodedPayLoad.clone();
    if (!config.decompress.equalsIgnoreCase("NONE")) {
        if (config.decompress.equalsIgnoreCase("SNAPPY")) {
            uncompressedPayLoad = Snappy.uncompress(decodedPayLoad);
        } else if (config.decompress.equalsIgnoreCase("GZIP")) {
            uncompressedPayLoad = ungzip(decodedPayLoad);
        } else if (config.decompress.equalsIgnoreCase("ZIP")) {
            uncompressedPayLoad = unzip(decodedPayLoad);
        }
    }

    // Parse the text as CSV and emit it as structured record.
    try {
        CSVParser parser = CSVParser.parse(new String(uncompressedPayLoad), csvFormat);
        List<CSVRecord> records = parser.getRecords();
        for (CSVRecord record : records) {
            if (fields.size() == record.size()) {
                StructuredRecord sRecord = createStructuredRecord(record);
                emitter.emit(sRecord);
            } else {
                // Write the record to error Dataset.
            }
        }
    } catch (IOException e) {

    }
}

From source file:com.github.douglasjunior.simpleCSVEditor.FXMLController.java

private ObservableList<CSVRow> readFile(File csvFile) throws IOException {
    ObservableList<CSVRow> rows = FXCollections.observableArrayList();
    Integer maxColumns = 0;/*from  w  w  w. j a  v  a  2s.  c o m*/
    try (Reader in = new InputStreamReader(new FileInputStream(csvFile));) {
        CSVParser parse = csvFormat.parse(in);
        for (CSVRecord record : parse.getRecords()) {
            if (maxColumns < record.size()) {
                maxColumns = record.size();
            }
            CSVRow row = new CSVRow();
            for (int i = 0; i < record.size(); i++) {
                row.getColumns().add(new SimpleStringProperty(record.get(i)));
            }
            rows.add(row);
        }
        this.numbeColumns = maxColumns;
    }
    return rows;
}