Example usage for org.apache.commons.csv CSVRecord size

List of usage examples for org.apache.commons.csv CSVRecord size

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVRecord size.

Prototype

public int size() 

Source Link

Document

Returns the number of values in this record.

Usage

From source file:org.apache.phoenix.end2end.CSVCommonsLoaderIT.java

@Test
public void testAllDatatypes() throws Exception {
    CSVParser parser = null;//from w ww .j a v  a2s  .c  o m
    PhoenixConnection conn = null;
    try {
        // Create table
        String statements = "CREATE TABLE IF NOT EXISTS " + DATATYPE_TABLE
                + " (CKEY VARCHAR NOT NULL PRIMARY KEY,"
                + "  CVARCHAR VARCHAR, CCHAR CHAR(10), CINTEGER INTEGER, CDECIMAL DECIMAL(31,10), CUNSIGNED_INT UNSIGNED_INT, CBOOLEAN BOOLEAN, CBIGINT BIGINT, CUNSIGNED_LONG UNSIGNED_LONG, CTIME TIME, CDATE DATE);";
        conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
        PhoenixRuntime.executeStatements(conn, new StringReader(statements), null);

        // Upsert CSV file
        CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, DATATYPE_TABLE, Collections.<String>emptyList(),
                true);
        csvUtil.upsert(new StringReader(DATATYPES_CSV_VALUES));

        // Compare Phoenix ResultSet with CSV file content
        PreparedStatement statement = conn.prepareStatement(
                "SELECT CKEY, CVARCHAR, CCHAR, CINTEGER, CDECIMAL, CUNSIGNED_INT, CBOOLEAN, CBIGINT, CUNSIGNED_LONG, CTIME, CDATE FROM "
                        + DATATYPE_TABLE);
        ResultSet phoenixResultSet = statement.executeQuery();
        parser = new CSVParser(new StringReader(DATATYPES_CSV_VALUES), csvUtil.getFormat());

        for (CSVRecord record : parser) {
            assertTrue(phoenixResultSet.next());
            int i = 0;
            int size = record.size();
            for (String value : record) {
                assertEquals(value, phoenixResultSet.getObject(i + 1).toString().toUpperCase());
                if (i < size - 2)
                    break;
                i++;
            }
            // special case for matching date, time values
            String timeFieldValue = record.get(9);
            assertEquals(timeFieldValue.isEmpty() ? null : DateUtil.parseTime(record.get(9)),
                    phoenixResultSet.getTime("CTIME"));

            String dateField = record.get(10);
            assertEquals(dateField.isEmpty() ? null : DateUtil.parseDate(record.get(10)),
                    phoenixResultSet.getDate("CDATE"));
        }

        assertFalse(phoenixResultSet.next());
    } finally {
        if (parser != null)
            parser.close();
        if (conn != null)
            conn.close();
    }
}

From source file:org.apache.phoenix.end2end.CSVCommonsLoaderTest.java

@Test
public void testAllDatatypes() throws Exception {
    CSVParser parser = null;/*from www .jav  a 2s . c o m*/
    PhoenixConnection conn = null;
    try {
        // Create table
        String statements = "CREATE TABLE IF NOT EXISTS " + DATATYPE_TABLE
                + " (CKEY VARCHAR NOT NULL PRIMARY KEY,"
                + "  CVARCHAR VARCHAR, CINTEGER INTEGER, CDECIMAL DECIMAL(31,10), CUNSIGNED_INT UNSIGNED_INT, CBOOLEAN BOOLEAN, CBIGINT BIGINT, CUNSIGNED_LONG UNSIGNED_LONG, CTIME TIME, CDATE DATE);";
        conn = DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
        PhoenixRuntime.executeStatements(conn, new StringReader(statements), null);

        // Upsert CSV file
        CSVCommonsLoader csvUtil = new CSVCommonsLoader(conn, DATATYPE_TABLE, Collections.<String>emptyList(),
                true);
        csvUtil.upsert(new StringReader(DATATYPES_CSV_VALUES));

        // Compare Phoenix ResultSet with CSV file content
        PreparedStatement statement = conn.prepareStatement(
                "SELECT CKEY, CVARCHAR, CINTEGER, CDECIMAL, CUNSIGNED_INT, CBOOLEAN, CBIGINT, CUNSIGNED_LONG, CTIME, CDATE FROM "
                        + DATATYPE_TABLE);
        ResultSet phoenixResultSet = statement.executeQuery();
        parser = new CSVParser(new StringReader(DATATYPES_CSV_VALUES), csvUtil.getFormat());

        for (CSVRecord record : parser) {
            assertTrue(phoenixResultSet.next());
            int i = 0;
            int size = record.size();
            for (String value : record) {
                assertEquals(value, phoenixResultSet.getObject(i + 1).toString().toUpperCase());
                if (i < size - 2)
                    break;
                i++;
            }
            // special case for matching date, time values
            assertEquals(DateUtil.parseTime(record.get(8)), phoenixResultSet.getTime("CTIME"));
            assertEquals(DateUtil.parseDate(record.get(9)), phoenixResultSet.getDate("CDATE"));
        }

        assertFalse(phoenixResultSet.next());
    } finally {
        if (parser != null)
            parser.close();
        if (conn != null)
            conn.close();
    }
}

From source file:org.apache.phoenix.flume.serializer.CsvEventSerializer.java

@Override
public void upsertEvents(List<Event> events) throws SQLException {
    Preconditions.checkNotNull(events);//  w  ww.ja  va2s.c o m
    Preconditions.checkNotNull(connection);
    Preconditions.checkNotNull(this.upsertStatement);

    boolean wasAutoCommit = connection.getAutoCommit();
    connection.setAutoCommit(false);
    try (PreparedStatement colUpsert = connection.prepareStatement(upsertStatement)) {
        String value = null;
        Integer sqlType = null;
        for (Event event : events) {
            byte[] payloadBytes = event.getBody();
            if (payloadBytes == null || payloadBytes.length == 0) {
                continue;
            }
            String payload = new String(payloadBytes);
            CSVRecord csvRecord = csvLineParser.parse(payload);
            if (colNames.size() != csvRecord.size()) {
                logger.debug("payload data {} doesn't match the fields mapping {} ", payload, colNames);
                continue;
            }
            Map<String, String> data = new HashMap<String, String>();
            for (int i = 0; i < csvRecord.size(); i++) {
                data.put(colNames.get(i), csvRecord.get(i));
            }
            Collection<String> values = data.values();
            if (values.contains(null)) {
                logger.debug("payload data {} doesn't match the fields mapping {} ", payload, colNames);
                continue;
            }

            int index = 1;
            int offset = 0;
            for (int i = 0; i < colNames.size(); i++, offset++) {
                if (columnMetadata[offset] == null) {
                    continue;
                }
                String colName = colNames.get(i);
                value = data.get(colName);
                sqlType = columnMetadata[offset].getSqlType();
                PDataType pDataType = PDataType.fromTypeId(sqlType);
                Object upsertValue;
                if (pDataType.isArrayType()) {
                    String arrayJson = Arrays.toString(value.split(csvArrayDelimiter));
                    JSONArray jsonArray = new JSONArray(new JSONTokener(arrayJson));
                    Object[] vals = new Object[jsonArray.length()];
                    for (int x = 0; x < jsonArray.length(); x++) {
                        vals[x] = jsonArray.get(x);
                    }
                    String baseTypeSqlName = PDataType.arrayBaseType(pDataType).getSqlTypeName();
                    Array array = connection.createArrayOf(baseTypeSqlName, vals);
                    upsertValue = pDataType.toObject(array, pDataType);
                } else {
                    upsertValue = pDataType.toObject(value);
                }
                if (upsertValue != null) {
                    colUpsert.setObject(index++, upsertValue, sqlType);
                } else {
                    colUpsert.setNull(index++, sqlType);
                }
            }

            // add headers if necessary
            Map<String, String> headerValues = event.getHeaders();
            for (int i = 0; i < headers.size(); i++, offset++) {
                String headerName = headers.get(i);
                String headerValue = headerValues.get(headerName);
                sqlType = columnMetadata[offset].getSqlType();
                Object upsertValue = PDataType.fromTypeId(sqlType).toObject(headerValue);
                if (upsertValue != null) {
                    colUpsert.setObject(index++, upsertValue, sqlType);
                } else {
                    colUpsert.setNull(index++, sqlType);
                }
            }

            if (autoGenerateKey) {
                sqlType = columnMetadata[offset].getSqlType();
                String generatedRowValue = this.keyGenerator.generate();
                Object rowkeyValue = PDataType.fromTypeId(sqlType).toObject(generatedRowValue);
                colUpsert.setObject(index++, rowkeyValue, sqlType);
            }
            colUpsert.execute();
        }
        connection.commit();
    } catch (Exception ex) {
        logger.error("An error {} occurred during persisting the event ", ex.getMessage());
        throw new SQLException(ex.getMessage());
    } finally {
        if (wasAutoCommit) {
            connection.setAutoCommit(true);
        }
    }

}

From source file:org.apache.phoenix.util.csv.CsvUpsertExecutor.java

/**
 * Upsert a single record.//w w  w.  j  av  a  2s . c  o m
 *
 * @param csvRecord CSV record containing the data to be upserted
 */
void execute(CSVRecord csvRecord) {
    try {
        if (csvRecord.size() < conversionFunctions.size()) {
            String message = String.format("CSV record does not have enough values (has %d, but needs %d)",
                    csvRecord.size(), conversionFunctions.size());
            throw new IllegalArgumentException(message);
        }
        for (int fieldIndex = 0; fieldIndex < conversionFunctions.size(); fieldIndex++) {
            Object sqlValue = conversionFunctions.get(fieldIndex).apply(csvRecord.get(fieldIndex));
            if (sqlValue != null) {
                preparedStatement.setObject(fieldIndex + 1, sqlValue);
            } else {
                preparedStatement.setNull(fieldIndex + 1, dataTypes.get(fieldIndex).getSqlType());
            }
        }
        preparedStatement.execute();
        upsertListener.upsertDone(++upsertCount);
    } catch (Exception e) {
        if (LOG.isDebugEnabled()) {
            // Even though this is an error we only log it with debug logging because we're notifying the
            // listener, and it can do its own logging if needed
            LOG.debug("Error on CSVRecord " + csvRecord, e);
        }
        upsertListener.errorOnRecord(csvRecord, e);
    }
}

From source file:org.apache.ranger.unixusersync.process.FileSourceUserGroupBuilder.java

public Map<String, List<String>> readTextFile(File textFile) throws Exception {

    Map<String, List<String>> ret = new HashMap<String, List<String>>();

    String delimiter = config.getUserSyncFileSourceDelimiter();

    CSVFormat csvFormat = CSVFormat.newFormat(delimiter.charAt(0));

    CSVParser csvParser = new CSVParser(new BufferedReader(new FileReader(textFile)), csvFormat);

    List<CSVRecord> csvRecordList = csvParser.getRecords();

    if (csvRecordList != null) {
        for (CSVRecord csvRecord : csvRecordList) {
            List<String> groups = new ArrayList<String>();
            String user = csvRecord.get(0);

            user = user.replaceAll("^\"|\"$", "");

            int i = csvRecord.size();

            for (int j = 1; j < i; j++) {
                String group = csvRecord.get(j);
                if (group != null && !group.isEmpty()) {
                    group = group.replaceAll("^\"|\"$", "");
                    groups.add(group);//from   ww w .  j  a  va2  s.  c  o m
                }
            }
            ret.put(user, groups);
        }
    }

    csvParser.close();

    return ret;
}

From source file:org.apache.storm.sql.runtime.serde.csv.CsvScheme.java

@Override
public List<Object> deserialize(ByteBuffer ser) {
    try {/*from  w w  w .  java2  s.c om*/
        String data = new String(Utils.toByteArray(ser), StandardCharsets.UTF_8);
        CSVParser parser = CSVParser.parse(data, CSVFormat.RFC4180);
        CSVRecord record = parser.getRecords().get(0);
        Preconditions.checkArgument(record.size() == fieldNames.size(), "Invalid schema");

        ArrayList<Object> list = new ArrayList<>(fieldNames.size());
        for (int i = 0; i < record.size(); i++) {
            list.add(record.get(i));
        }
        return list;
    } catch (IOException e) {
        throw new RuntimeException(e);
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

public static void parseStudy(InputStream stream, XHTMLContentHandler xhtml, Metadata metadata,
        ParseContext context) throws IOException, TikaException, SAXException {
    TikaInputStream tis = TikaInputStream.get(stream);
    // Automatically detect the character encoding
    TikaConfig tikaConfig = context.get(TikaConfig.class);
    if (tikaConfig == null) {
        tikaConfig = TikaConfig.getDefaultConfig();
    }//www.jav a 2 s  .  c o m
    try (AutoDetectReader reader = new AutoDetectReader(new CloseShieldInputStream(tis), metadata,
            tikaConfig.getEncodingDetector()); CSVParser csvParser = new CSVParser(reader, CSVFormat.TDF)) {
        Iterator<CSVRecord> iterator = csvParser.iterator();

        xhtml.startElement("table");

        xhtml.startElement("thead");
        if (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            for (int i = 0; i < record.size(); i++) {
                xhtml.startElement("th");
                xhtml.characters(record.get(i));
                xhtml.endElement("th");
            }
        }
        xhtml.endElement("thead");

        xhtml.startElement("tbody");
        while (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            xhtml.startElement("tr");
            for (int j = 0; j < record.size(); j++) {
                xhtml.startElement("td");
                xhtml.characters(record.get(j));
                xhtml.endElement("td");
            }
            xhtml.endElement("tr");
        }
        xhtml.endElement("tbody");

        xhtml.endElement("table");
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

public static void parseAssay(InputStream stream, XHTMLContentHandler xhtml, Metadata metadata,
        ParseContext context) throws IOException, TikaException, SAXException {
    TikaInputStream tis = TikaInputStream.get(stream);

    // Automatically detect the character encoding

    TikaConfig tikaConfig = context.get(TikaConfig.class);
    if (tikaConfig == null) {
        tikaConfig = TikaConfig.getDefaultConfig();
    }//from w w w  .  ja va2s  .  c  o  m
    try (AutoDetectReader reader = new AutoDetectReader(new CloseShieldInputStream(tis), metadata,
            tikaConfig.getEncodingDetector()); CSVParser csvParser = new CSVParser(reader, CSVFormat.TDF)) {
        xhtml.startElement("table");

        Iterator<CSVRecord> iterator = csvParser.iterator();

        xhtml.startElement("thead");
        if (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            for (int i = 0; i < record.size(); i++) {
                xhtml.startElement("th");
                xhtml.characters(record.get(i));
                xhtml.endElement("th");
            }
        }
        xhtml.endElement("thead");

        xhtml.startElement("tbody");
        while (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            xhtml.startElement("tr");
            for (int j = 0; j < record.size(); j++) {
                xhtml.startElement("td");
                xhtml.characters(record.get(j));
                xhtml.endElement("td");
            }
            xhtml.endElement("tr");
        }
        xhtml.endElement("tbody");

        xhtml.endElement("table");
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

private static void extractMetadata(Reader reader, Metadata metadata, String studyFileName) throws IOException {
    boolean investigationSection = false;
    boolean studySection = false;
    boolean studyTarget = false;

    Map<String, String> map = new HashMap<String, String>();

    try (CSVParser csvParser = new CSVParser(reader, CSVFormat.TDF)) {
        Iterator<CSVRecord> iterator = csvParser.iterator();

        while (iterator.hasNext()) {
            CSVRecord record = iterator.next();
            String field = record.get(0);
            if ((field.toUpperCase(Locale.ENGLISH).equals(field)) && (record.size() == 1)) {
                investigationSection = Arrays.asList(sections).contains(field);
                studySection = (studyFileName != null) && (field.equals(studySectionField));
            } else {
                if (investigationSection) {
                    addMetadata(field, record, metadata);
                } else if (studySection) {
                    if (studyTarget) {
                        break;
                    }/*from   w w  w  .j  ava2  s.  co  m*/
                    String value = record.get(1);
                    map.put(field, value);
                    studyTarget = (field.equals(studyFileNameField)) && (value.equals(studyFileName));
                    if (studyTarget) {
                        mapStudyToMetadata(map, metadata);
                        studySection = false;
                    }
                } else if (studyTarget) {
                    addMetadata(field, record, metadata);
                }
            }
        }
    } catch (IOException ioe) {
        throw ioe;
    }
}

From source file:org.apache.tika.parser.isatab.ISATabUtils.java

private static void addMetadata(String field, CSVRecord record, Metadata metadata) {
    if ((record == null) || (record.size() <= 1)) {
        return;/*from  ww w .ja va 2s  .c o m*/
    }

    for (int i = 1; i < record.size(); i++) {
        metadata.add(field, record.get(i));
    }
}