List of usage examples for org.apache.commons.csv CSVFormat DEFAULT
CSVFormat DEFAULT
To view the source code for org.apache.commons.csv CSVFormat DEFAULT.
Click Source Link
From source file:com.streamsets.pipeline.lib.parser.delimited.TestDelimitedCharDataParser.java
@Test public void testParseNoHeaderWithOffset() throws Exception { OverrunReader reader = new OverrunReader(new StringReader("A,B\na,b"), 1000, true, false); DataParser parser = new DelimitedCharDataParser(getContext(), "id", reader, 4, 0, CSVFormat.DEFAULT, CsvHeader.NO_HEADER, -1, CsvRecordType.LIST); Assert.assertEquals("4", parser.getOffset()); Record record = parser.parse();//w ww.jav a2 s .c o m Assert.assertNotNull(record); Assert.assertEquals("id::4", record.getHeader().getSourceId()); Assert.assertEquals("a", record.get().getValueAsList().get(0).getValueAsMap().get("value").getValueAsString()); Assert.assertFalse(record.has("[0]/header")); Assert.assertEquals("b", record.get().getValueAsList().get(1).getValueAsMap().get("value").getValueAsString()); Assert.assertFalse(record.has("[1]/header")); Assert.assertEquals("7", parser.getOffset()); record = parser.parse(); Assert.assertNull(record); Assert.assertEquals("-1", parser.getOffset()); parser.close(); }
From source file:com.datascience.cascading.scheme.CsvScheme.java
/** * Creates a new CSV scheme with {@link org.apache.commons.csv.CSVFormat#DEFAULT}. * <p>/*from ww w. j a v a2s.co m*/ * Strict mode is enabled when using this constructor. * * @param charset The character set with which to read and write CSV files. * @see com.datascience.cascading.scheme.CsvScheme */ public CsvScheme(Charset charset) { this(Fields.ALL, Fields.ALL, CSVFormat.DEFAULT, charset, true); }
From source file:com.mycompany.twitterapp.TwitterApp.java
public void saveToFile(long id, String tweet, String user, Orientation orientation) { OutputStreamWriter fileWriter = null; CSVPrinter csvFilePrinter = null;// ww w. j a va2 s . c o m CSVFormat csvFileFormat = CSVFormat.DEFAULT; if (tweet != null) { try { fileWriter = new OutputStreamWriter(new FileOutputStream(tweetFileName, true), "UTF-8"); csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat); csvFilePrinter.printRecord(id, orientation.name().toLowerCase(), tweet); } catch (IOException ex) { Logger.getLogger(TwitterApp.class.getName()).log(Level.SEVERE, null, ex); } finally { try { fileWriter.flush(); fileWriter.close(); csvFilePrinter.close(); } catch (IOException e) { System.out.println("Error while flushing/closing fileWriter/csvPrinter !!!"); e.printStackTrace(); } } } else { try { fileWriter = new OutputStreamWriter(new FileOutputStream(userFileName, true), "UTF-8"); csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat); csvFilePrinter.printRecord(id, user, orientation.name().toLowerCase()); } catch (IOException ex) { Logger.getLogger(TwitterApp.class.getName()).log(Level.SEVERE, null, ex); } finally { try { fileWriter.flush(); fileWriter.close(); csvFilePrinter.close(); } catch (IOException e) { System.out.println("Error while flushing/closing fileWriter/csvPrinter !!!"); e.printStackTrace(); } } } }
From source file:com.datascience.cascading.scheme.CsvScheme.java
/** * Creates a new CSV scheme with the {@link org.apache.commons.csv.CSVFormat#DEFAULT} format. * <p>//from w ww . j a va 2 s . c o m * The CSV input/output encoding set defaults to {@code UTF-8} * * @param strict Indicates whether to parse records in strict parsing mode. When strict mode is disabled, single record * parse errors will be caught and logged. * @see com.datascience.cascading.scheme.CsvScheme */ public CsvScheme(boolean strict) { this(Fields.ALL, Fields.ALL, CSVFormat.DEFAULT, StandardCharsets.UTF_8, strict); }
From source file:com.team3637.service.MatchServiceMySQLImpl.java
@Override public void importCSV(String inputFile) { try {/*from w ww .j a va 2 s. c o m*/ String csvData = new String(Files.readAllBytes(FileSystems.getDefault().getPath(inputFile))); csvData = csvData.replaceAll("\\r", ""); CSVParser parser = CSVParser.parse(csvData, CSVFormat.DEFAULT.withRecordSeparator("\n")); for (CSVRecord record : parser) { Match match = new Match(); match.setId(Integer.parseInt(record.get(0))); match.setMatchNum(Integer.parseInt(record.get(1))); match.setTeam(Integer.parseInt(record.get(2))); match.setScore(Integer.parseInt(record.get(3))); String[] tags = record.get(4).substring(1, record.get(4).length() - 1).split(","); for (int i = 0; i < tags.length; i++) tags[i] = tags[i].trim(); if (tags.length > 0 && !tags[0].equals("")) match.setTags(Arrays.asList(tags)); else match.setTags(new ArrayList<String>()); if (checkForMatch(match.getMatchNum(), match.getTeam())) update(match); else create(match); } } catch (IOException e) { e.printStackTrace(); } }
From source file:apiconnector.TestDataFunctionality.java
@Ignore @Test//from w ww. jav a 2s . com public void testGetDataAsCsv() throws Exception { //client_read.setVerboseLevel(1); Random random = new Random(); Map<String, String> filters = new TreeMap<String, String>(); filters.put("tag", "study_14"); DataSet[] all = client_read.dataList(filters).getData(); for (int i = 0; i < 5;) { DataSet current = all[random.nextInt(all.length)]; String numInst = current.getQualityMap().get("NumberOfInstances"); if (current.getFileId() == null || !current.getFormat().toLowerCase().equals("arff")) { continue; } String fullUrl = url + "data/get_csv/" + current.getFileId() + "/" + current.getName() + ".csv"; System.out.println(fullUrl); final URL url = new URL(fullUrl); final Reader reader = new InputStreamReader(new BOMInputStream(url.openStream()), "UTF-8"); final CSVParser parser = new CSVParser(reader, CSVFormat.DEFAULT); try { if (numInst != null) { int numberOfInstances = (int) Double.parseDouble(numInst); assertEquals(parser.getRecords().size(), numberOfInstances); } } finally { parser.close(); reader.close(); } // important i += 1; } }
From source file:canreg.client.dataentry.Convert.java
public static boolean convertData(canreg.client.gui.management.CanReg4MigrationInternalFrame.MigrationTask task, String filepath, String datafile, String regcode) { Connection conn;/* w ww .j av a 2 s . c o m*/ Statement stmt; ResultSet rs_hdr; ResultSet rs_data; boolean success = false; int totalrowcount = 0; int rowsImported = 0; String csv = filepath + Globals.FILE_SEPARATOR + regcode + ".csv"; CSVPrinter printer; try { debugOut("Migrating data " + datafile); pconn = (ParadoxConnection) DriverManager .getConnection("jdbc:paradox:///" + filepath.replaceAll("\\\\", "/")); final ParadoxTable table = TableData.listTables(pconn, datafile).get(0); totalrowcount = table.getRowCount(); SystemDescription sd = new SystemDescription( Globals.CANREG_SERVER_SYSTEM_CONFIG_FOLDER + Globals.FILE_SEPARATOR + regcode + ".xml"); DatabaseVariablesListElement[] variableListElements; variableListElements = sd.getDatabaseVariableListElements(); ArrayList<String> dbvle = new ArrayList(); ArrayList<String> cols = new ArrayList(); // Handling variables names with reservered word by replacing underscore after variable name. for (DatabaseVariablesListElement variable : variableListElements) { if (variable.getShortName().endsWith("_")) { dbvle.add(variable.getShortName().replace("_", "")); } else { dbvle.add(variable.getShortName()); } } conn = DriverManager.getConnection("jdbc:paradox:///" + filepath.replaceAll("\\\\", "/")); final DatabaseMetaData meta = conn.getMetaData(); rs_hdr = meta.getColumns("", "", datafile, "%"); //Comparing variables in file and database while (rs_hdr.next()) { for (String dbvar : dbvle) { if (rs_hdr.getString("COLUMN_NAME").equals(dbvar) || rs_hdr.getString("COLUMN_NAME").replaceAll(" ", "_").equals(dbvar)) { cols.add(rs_hdr.getString("COLUMN_NAME")); } } } String[] strheader = new String[cols.size()]; String query = "SELECT "; for (int i = 0; i < cols.size(); i++) { strheader[i] = cols.get(i).toString(); if (i == cols.size() - 1) { query += "\"" + strheader[i] + "\""; } else { query += "\"" + strheader[i] + "\","; } } query += " FROM \"" + datafile + "\""; CSVFormat format = CSVFormat.DEFAULT.withFirstRecordAsHeader().withHeader(strheader).withDelimiter(','); debugOut(query); printer = new CSVPrinter(new FileWriter(csv), format); int hdrsize = strheader.length; Object[] strdata = new String[hdrsize]; stmt = conn.createStatement(); rs_data = stmt.executeQuery(query); if (Globals.DEBUG) { Statement stmt2 = conn.createStatement(); String q = "SELECT RecNum FROM \"" + datafile + "\""; ResultSet rs_all_data = stmt2.executeQuery(q); debugOut(rs_all_data.toString()); } while (rs_data.next()) { for (int i = 1; i < rs_data.getMetaData().getColumnCount() + 1; i++) { switch (rs_data.getMetaData().getColumnType(i)) { case 4: strdata[i - 1] = Integer.toString(rs_data.getShort(i)); break; case 12: strdata[i - 1] = StringEscapeUtils.escapeCsv(rs_data.getString(i)); break; } } printer.printRecord(strdata); rowsImported++; } printer.flush(); printer.close(); success = true; } catch (SQLException ex) { Logger.getLogger(Convert.class.getName()).log(Level.SEVERE, null, ex); } catch (IOException ex) { Logger.getLogger(Convert.class.getName()).log(Level.SEVERE, null, ex); } success = success && (rowsImported == totalrowcount); return success; }
From source file:com.denkbares.semanticcore.utils.ResultTableModel.java
public static ResultTableModel fromCSV(String csv) throws IOException { try (CSVParser parser = CSVFormat.DEFAULT.withHeader().parse(new StringReader(csv))) { // read the header Map<String, Integer> headerMap = parser.getHeaderMap(); List<String> variables = headerMap.entrySet().stream().sorted(Comparator.comparing(Map.Entry::getValue)) .map(Map.Entry::getKey).collect(Collectors.toList()); // read the rows List<TableRow> rows = new LinkedList<>(); for (final CSVRecord record : parser) { SimpleTableRow row = new SimpleTableRow(); for (String variable : variables) { String value = record.get(variable); if (value != null) { row.addValue(variable, new LiteralImpl(value)); }//from w ww.j a v a 2s .c om } rows.add(row); } // and return the parsed table return new ResultTableModel(rows, variables); } }
From source file:com.streamsets.pipeline.lib.parser.delimited.TestDelimitedCharDataParser.java
@Test public void testParseIgnoreHeaderWithOffset() throws Exception { OverrunReader reader = new OverrunReader(new StringReader("A,B\na,b"), 1000, true, false); DataParser parser = new DelimitedCharDataParser(getContext(), "id", reader, 4, 0, CSVFormat.DEFAULT, CsvHeader.IGNORE_HEADER, -1, CsvRecordType.LIST); Assert.assertEquals("4", parser.getOffset()); Record record = parser.parse();//w ww . j av a 2s .c o m Assert.assertNotNull(record); Assert.assertEquals("id::4", record.getHeader().getSourceId()); Assert.assertEquals("a", record.get().getValueAsList().get(0).getValueAsMap().get("value").getValueAsString()); Assert.assertFalse(record.has("[0]/header")); Assert.assertEquals("b", record.get().getValueAsList().get(1).getValueAsMap().get("value").getValueAsString()); Assert.assertFalse(record.has("[1]/header")); Assert.assertEquals("7", parser.getOffset()); record = parser.parse(); Assert.assertNull(record); Assert.assertEquals("-1", parser.getOffset()); parser.close(); }
From source file:com.datascience.cascading.scheme.CsvScheme.java
/** * Creates a new CSV scheme with the {@link org.apache.commons.csv.CSVFormat#DEFAULT} format. * * @param charset The character set with which to read and write CSV files. * @param strict Indicates whether to parse records in strict parsing mode. When strict mode is disabled, single record * parse errors will be caught and logged. * @see com.datascience.cascading.scheme.CsvScheme *///from w w w .ja v a2 s . c o m public CsvScheme(Charset charset, boolean strict) { this(Fields.ALL, Fields.ALL, CSVFormat.DEFAULT, charset, strict); }