List of usage examples for org.apache.commons.csv CSVFormat DEFAULT
CSVFormat DEFAULT
To view the source code for org.apache.commons.csv CSVFormat DEFAULT.
Click Source Link
From source file:org.easybatch.extensions.apache.common.csv.ApacheCommonCsvRecordMapperTest.java
@Test public void testApacheCommonCsvQualifier() throws Exception { StringReader stringReader = new StringReader("'foo,s','bar,n'"); CSVFormat csvFormat = CSVFormat.DEFAULT.withQuote('\'').withHeader("firstName", "lastName", "age", "married"); ApacheCommonCsvRecord record = getApacheCommonCsvRecord(stringReader, csvFormat); GenericRecord<Foo> actual = mapper.processRecord(record); Foo foo = actual.getPayload();/*from ww w . ja v a 2 s .c om*/ assertThat(foo).isNotNull(); assertThat(foo.getFirstName()).isEqualTo("foo,s"); assertThat(foo.getLastName()).isEqualTo("bar,n"); assertThat(foo.getAge()).isEqualTo(0); assertThat(foo.isMarried()).isFalse(); }
From source file:org.easybatch.extensions.apache.common.csv.ApacheCommonCsvRecordMapperTest.java
@Test public void testApacheCommonCsvLineFeed() throws Exception { StringReader stringReader = new StringReader("'foo" + LINE_SEPARATOR + "','bar" + LINE_SEPARATOR + "'"); CSVFormat csvFormat = CSVFormat.DEFAULT.withQuote('\'').withHeader("firstName", "lastName", "age", "married"); ApacheCommonCsvRecord record = getApacheCommonCsvRecord(stringReader, csvFormat); GenericRecord<Foo> actual = mapper.processRecord(record); Foo foo = actual.getPayload();/*from w w w . j ava 2s . c o m*/ assertThat(foo).isNotNull(); assertThat(foo.getFirstName()).isEqualTo("foo" + LINE_SEPARATOR); assertThat(foo.getLastName()).isEqualTo("bar" + LINE_SEPARATOR); assertThat(foo.getAge()).isEqualTo(0); assertThat(foo.isMarried()).isFalse(); }
From source file:org.easybatch.extensions.apache.common.csv.ApacheCommonCsvRecordReaderTest.java
@Before public void setUp() throws Exception { StringReader stringReader = new StringReader("foo,bar,15,true"); CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("firstName", "lastName", "age", "married"); CSVParser parser = new CSVParser(stringReader, csvFormat); recordReader = new ApacheCommonCsvRecordReader(parser); recordReader.open();/* w w w . ja v a 2s . co m*/ }
From source file:org.easybatch.extensions.apache.common.csv.ApacheCommonCsvSupportIntegrationTest.java
@Test public void testAllComponentsTogether() throws Exception { CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("id", "user", "message"); CSVParser parser = new CSVParser(new FileReader(this.getClass().getResource("/tweets.csv").getFile()), csvFormat);/*from w w w . ja v a 2 s.c o m*/ aNewJob().reader(new ApacheCommonCsvRecordReader(parser)) .mapper(new ApacheCommonCsvRecordMapper(Tweet.class)) .marshaller(new ApacheCommonCsvRecordMarshaller(Tweet.class, new String[] { "id", "user", "message" }, ';', '\'')) .writer(new StandardOutputRecordWriter()).call(); assertThat(systemOut.getLog()).isEqualTo("'1';'foo';'hello'" + LINE_SEPARATOR + "'2';'bar';'hey'" + LINE_SEPARATOR + "'3';'baz';'hi'" + LINE_SEPARATOR); }
From source file:org.ecloudmanager.monitoring.HaproxyStatsCollector.java
private void collectStats(String haproxyStatsAddr) { client.target("http://" + haproxyStatsAddr + ":22002" + "/;csv").request().async() .get(new InvocationCallback<Response>() { @Override/*from ww w . ja va 2s . c o m*/ public void completed(Response response) { if (response.getStatus() == 200) { String csv = response.readEntity(String.class); csv = csv.replaceFirst("# ", ""); List<CSVRecord> records = null; try { records = CSVParser.parse(csv, CSVFormat.DEFAULT.withHeader()).getRecords(); collectRecords(haproxyStatsAddr, records); } catch (IOException e) { log.error(e); } } else { log.error("Cannot connect to haproxy stats endpoint " + haproxyStatsAddr + " response code " + response.getStatus()); } } @Override public void failed(Throwable throwable) { log.trace("Can't get haproxy stats from " + haproxyStatsAddr, throwable); } }); }
From source file:org.ecloudmanager.service.deployment.geolite.GeolocationService.java
@PostConstruct private void init() { try {//from w w w .j a va 2s.c o m URL cityUrl = getClass().getClassLoader().getResource("/GeoLite2-City-Locations-en.csv"); List<CSVRecord> cityRecords = CSVParser .parse(cityUrl, Charset.defaultCharset(), CSVFormat.DEFAULT.withHeader()).getRecords(); URL countryUrl = getClass().getClassLoader().getResource("/GeoLite2-Country-Locations-en.csv"); List<CSVRecord> countryRecords = CSVParser .parse(countryUrl, Charset.defaultCharset(), CSVFormat.DEFAULT.withHeader()).getRecords(); cities = cityRecords.stream().map(record -> { StringBuilder labelBuilder = new StringBuilder(); List<String> items = new ArrayList<>(); String country = (record.get("country_iso_code") + " " + record.get("country_name")).trim(); items.add(country); String subdivision = (record.get("subdivision_1_name") + " " + record.get("subdivision_2_name")) .trim(); items.add(subdivision); String city = record.get("city_name").trim(); items.add(city); items.forEach(item -> { if (!StringUtils.isEmpty(item.trim())) { if (labelBuilder.length() > 0) { labelBuilder.append(", "); } labelBuilder.append(item); } }); return new GeolocationRecord(record.get("geoname_id"), country, subdivision, city, labelBuilder.toString()); }).collect(Collectors.toMap(GeolocationRecord::getGeoid, record -> record)); countries = countryRecords.stream().map(record -> { String label = record.get("country_iso_code") + " " + record.get("country_name"); return new GeolocationRecord(record.get("geoname_id"), label, "", "", label); }).collect(Collectors.toMap(GeolocationRecord::getGeoid, record -> record)); } catch (IOException e) { log.error("Cannot initialize geolocation service", e); } }
From source file:org.gephi.io.exporter.plugin.ExporterSpreadsheet.java
private void exportData(Graph graph) throws Exception { final CSVFormat format = CSVFormat.DEFAULT.withDelimiter(fieldDelimiter); try (CSVPrinter csvWriter = new CSVPrinter(writer, format)) { boolean isEdgeTable = tableToExport != ExportTable.NODES; Table table = isEdgeTable ? graph.getModel().getEdgeTable() : graph.getModel().getNodeTable(); ElementIterable<? extends Element> rows; Object[] edgeLabels = graph.getModel().getEdgeTypeLabels(); boolean includeEdgeKindColumn = false; for (Object edgeLabel : edgeLabels) { if (edgeLabel != null && !edgeLabel.toString().isEmpty()) { includeEdgeKindColumn = true; }/* w w w .ja v a2 s .co m*/ } TimeFormat timeFormat = graph.getModel().getTimeFormat(); DateTimeZone timeZone = graph.getModel().getTimeZone(); List<Column> columns = new ArrayList<>(); if (columnIdsToExport != null) { for (String columnId : columnIdsToExport) { Column column = table.getColumn(columnId); if (column != null) { columns.add(column); } } } else { for (Column column : table) { columns.add(column); } } //Write column headers: if (isEdgeTable) { csvWriter.print("Source"); csvWriter.print("Target"); csvWriter.print("Type"); if (includeEdgeKindColumn) { csvWriter.print("Kind"); } } for (Column column : columns) { //Use the title only if it's the same as the id (case insensitive): String columnId = column.getId(); String columnTitle = column.getTitle(); String columnHeader = columnId.equalsIgnoreCase(columnTitle) ? columnTitle : columnId; csvWriter.print(columnHeader); } csvWriter.println(); //Write rows: if (isEdgeTable) { rows = graph.getEdges(); } else { rows = graph.getNodes(); } for (Element row : rows) { if (isEdgeTable) { Edge edge = (Edge) row; csvWriter.print(edge.getSource().getId()); csvWriter.print(edge.getTarget().getId()); csvWriter.print(edge.isDirected() ? "Directed" : "Undirected"); if (includeEdgeKindColumn) { csvWriter.print(edge.getTypeLabel().toString()); } } for (Column column : columns) { Object value = row.getAttribute(column); String text; if (value != null) { if (value instanceof Number) { text = NUMBER_FORMAT.format(value); } else { text = AttributeUtils.print(value, timeFormat, timeZone); } } else { text = ""; } csvWriter.print(text); } csvWriter.println(); } } }
From source file:org.gephi.io.importer.plugin.file.spreadsheet.SpreadsheetUtils.java
public static CSVParser configureCSVParser(File file, Character fieldSeparator, Charset charset, boolean withFirstRecordAsHeader) throws IOException { if (fieldSeparator == null) { fieldSeparator = ','; }//w w w . j av a 2 s . c om CSVFormat csvFormat = CSVFormat.DEFAULT.withDelimiter(fieldSeparator).withEscape('\\') .withIgnoreEmptyLines(true).withNullString("").withIgnoreSurroundingSpaces(true).withTrim(true); if (withFirstRecordAsHeader) { csvFormat = csvFormat.withFirstRecordAsHeader().withAllowMissingColumnNames(false) .withIgnoreHeaderCase(false); } else { csvFormat = csvFormat.withHeader((String[]) null).withSkipHeaderRecord(false); } boolean hasBOM = false; try (FileInputStream is = new FileInputStream(file)) { CharsetToolkit charsetToolkit = new CharsetToolkit(is); hasBOM = charsetToolkit.hasUTF8Bom() || charsetToolkit.hasUTF16BEBom() || charsetToolkit.hasUTF16LEBom(); } catch (IOException e) { //NOOP } FileInputStream fileInputStream = new FileInputStream(file); InputStreamReader is = new InputStreamReader(fileInputStream, charset); if (hasBOM) { try { is.read(); } catch (IOException e) { // should never happen, as a file with no content // but with a BOM has at least one char } } return new CSVParser(is, csvFormat); }
From source file:org.gitia.jdataanalysis.JDataAnalysis.java
public void save(String[][] data, String[] headers, String folder, String fileName) { String NEW_LINE_SEPARATOR = "\n"; FileWriter fileWriter = null; CSVPrinter csvFilePrinter = null;/*from ww w. j a v a 2s. c o m*/ //Create the CSVFormat object with "\n" as a record delimiter CSVFormat csvFileFormat = CSVFormat.DEFAULT.withRecordSeparator(NEW_LINE_SEPARATOR); try { //initialize FileWriter object File file = new File(folder + "/" + fileName); fileWriter = new FileWriter(file); //initialize CSVPrinter object csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat); //Create CSV file header csvFilePrinter.printRecord(headers); //Write a new student object list to the CSV file for (int i = 0; i < data.length; i++) { //List studentDataRecord = new ArrayList(); csvFilePrinter.printRecord(data[i]); } System.out.println("CSV file was created successfully !!!"); System.out.println(folder + "/" + fileName); } catch (Exception e) { System.out.println("Error in CsvFileWriter !!!"); e.printStackTrace(); } finally { try { fileWriter.flush(); fileWriter.close(); csvFilePrinter.close(); } catch (IOException e) { System.out.println("Error while flushing/closing fileWriter/csvPrinter !!!"); e.printStackTrace(); } } }
From source file:org.gitia.jdataanalysis.JDataAnalysis.java
/** * * @param list// w w w.j a v a2s . c o m * @param folder * @param fileName */ public void save(List<String> list, String folder, String fileName) { String NEW_LINE_SEPARATOR = "\n"; FileWriter fileWriter = null; CSVPrinter csvFilePrinter = null; //Create the CSVFormat object with "\n" as a record delimiter CSVFormat csvFileFormat = CSVFormat.DEFAULT.withRecordSeparator(NEW_LINE_SEPARATOR); try { //initialize FileWriter object File file = new File(folder + "/" + fileName); fileWriter = new FileWriter(file); //initialize CSVPrinter object csvFilePrinter = new CSVPrinter(fileWriter, csvFileFormat); //Create CSV file header //csvFilePrinter.printRecord(headers); //Write a new student object list to the CSV file for (int i = 0; i < list.size(); i++) { //List studentDataRecord = new ArrayList(); csvFilePrinter.printRecord(list.get(i)); } System.out.println("CSV file was created successfully !!!"); System.out.println(folder + "/" + fileName); } catch (Exception e) { System.out.println("Error in CsvFileWriter !!!"); } finally { try { fileWriter.flush(); fileWriter.close(); csvFilePrinter.close(); } catch (IOException e) { System.out.println("Error while flushing/closing fileWriter/csvPrinter !!!"); e.printStackTrace(); } } }