List of usage examples for org.apache.commons.csv CSVFormat DEFAULT
CSVFormat DEFAULT
To view the source code for org.apache.commons.csv CSVFormat DEFAULT.
Click Source Link
From source file:geovista.readers.csv.GeogCSVReader.java
public Object[] readFileNew(InputStream is) { // get first line BufferedReader in = new BufferedReader(new InputStreamReader(is)); Iterable<CSVRecord> parser = null; try {// w w w . j av a 2 s . c o m parser = CSVFormat.DEFAULT.withDelimiter(this.currDelimiter).parse(in); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return null; }
From source file:com.streamsets.pipeline.lib.parser.delimited.TestDelimitedCharDataParser.java
@Test public void testParseWithHeaderWithListMap() throws Exception { OverrunReader reader = new OverrunReader(new StringReader("A,B\na,b"), 1000, true, false); DataParser parser = new DelimitedCharDataParser(getContext(), "id", reader, 0, 0, CSVFormat.DEFAULT, CsvHeader.WITH_HEADER, -1, CsvRecordType.LIST_MAP); Assert.assertEquals("4", parser.getOffset()); Record record = parser.parse();//w w w .j a v a2 s . c o m Assert.assertNotNull(record); Assert.assertEquals("id::4", record.getHeader().getSourceId()); Assert.assertEquals("a", record.get().getValueAsListMap().get("A").getValueAsString()); Assert.assertEquals("b", record.get().getValueAsListMap().get("B").getValueAsString()); Assert.assertEquals("7", parser.getOffset()); record = parser.parse(); Assert.assertNull(record); Assert.assertEquals("-1", parser.getOffset()); parser.close(); }
From source file:biz.ganttproject.impex.csv.GanttCSVOpen.java
private CSVFormat createCSVFormat(List<String> headers) { CSVFormat format = CSVFormat.DEFAULT.withIgnoreEmptyLines(false).withIgnoreSurroundingSpaces(true); if (myCsvOptions != null) { format = format.withDelimiter(myCsvOptions.sSeparatedChar.charAt(0)) .withQuote(myCsvOptions.sSeparatedTextChar.charAt(0)); }/* ww w. j a va2 s . com*/ if (headers != null) { format = format.withHeader(headers.toArray(new String[0])); } return format; }
From source file:br.ufg.calendario.components.EventoBean.java
public void uploadEvento(FileUploadEvent event) { Map<String, Object> requestMap = FacesContext.getCurrentInstance().getExternalContext().getRequestMap(); FacesMessage msg;/*from ww w.jav a 2 s.c om*/ boolean saveStatus = false; UploadedFile arquivo = event.getFile(); try { InputStream arquivoReader = arquivo.getInputstream(); Charset charset = Charset.forName("UTF-8"); CharsetDecoder decoder = charset.newDecoder(); Reader reader = new InputStreamReader(arquivoReader, decoder); CSVParser parser = new CSVParser(reader, CSVFormat.DEFAULT.withHeader().withDelimiter(configBean.getDelimiter())); SimpleDateFormat dateFormatter = new SimpleDateFormat(configBean.getDateFormat()); for (Entry<String, Integer> entry : parser.getHeaderMap().entrySet()) { System.out.format("header: %s - %d\n", entry.getKey(), entry.getValue()); } Integer ano; Calendario cal = null; List<Regional> regionais = regionalDao.listar(); List<Interessado> interessados = interessadoDao.listar(); for (CSVRecord record : parser) { //adicionar entidade calendario (select box) na tela importar eventos. ano = Integer.parseInt(record.get(0)); Date dataInicio = dateFormatter.parse(record.get(1)); Date dataTermino = dateFormatter.parse(record.get(2)); String assunto = record.get(3); String descricao = record.get(4); String[] interessadoArray = record.get(5).split(configBean.getRegexSplitter()); String[] regionalArray = record.get(6).split(configBean.getRegexSplitter()); boolean aprovado = record.get(7) != null && record.get(7).trim().toUpperCase().equals("S"); if (cal == null) { //buscar apenas uma vez cal = calendarioDao.buscar(ano); } Set<Interessado> interessadoSet = new HashSet(); for (String interessado : interessadoArray) { if (!interessado.isEmpty()) { for (Interessado i : interessados) { if (i.getNome().equals(interessado.trim())) { interessadoSet.add(i); } } } } Set<Regional> regionalSet = new HashSet(); for (String regional : regionalArray) { if (!regional.isEmpty()) { for (Regional r : regionais) { if (r.getNome().equals(regional.trim())) { regionalSet.add(r); } } } } Evento evt = new Evento(assunto, dataInicio, dataTermino, descricao, cal, regionalSet, interessadoSet, aprovado); eventosImportados.add(evt); } } catch (IOException | ParseException | ArrayIndexOutOfBoundsException | NullPointerException e) { System.out.println("erro: " + e.getMessage()); } System.out.println("arquivo enviado: " + arquivo.getFileName()); msg = new FacesMessage(FacesMessage.SEVERITY_INFO, "info", LocaleBean.getMessage("arquivoEnviado")); FacesContext.getCurrentInstance().addMessage(null, msg); RequestContext.getCurrentInstance().addCallbackParam("resultado", saveStatus); }
From source file:com.datascience.cascading.scheme.CsvScheme.java
/** * Creates a new CSV scheme with {@link org.apache.commons.csv.CSVFormat#DEFAULT}. * <p>/*from w w w . ja va 2s .co m*/ * Strict mode is enabled when using this constructor. * <p> * The CSV input/output encoding set defaults to {@code UTF-8} * * @see com.datascience.cascading.scheme.CsvScheme */ public CsvScheme() { this(Fields.ALL, Fields.ALL, CSVFormat.DEFAULT, StandardCharsets.UTF_8, true); }
From source file:com.team3637.service.MatchServiceMySQLImpl.java
@Override public void exportCSV(String outputFile) { List<Match> data = getMatches(); FileWriter fileWriter = null; CSVPrinter csvFilePrinter = null;//from w ww.j a va2 s . c o m try { fileWriter = new FileWriter(outputFile); csvFilePrinter = new CSVPrinter(fileWriter, CSVFormat.DEFAULT.withRecordSeparator("\n")); for (Match match : data) { List<Object> line = new ArrayList<>(); for (Field field : Match.class.getDeclaredFields()) { field.setAccessible(true); Object value = field.get(match); line.add(value); } csvFilePrinter.printRecord(line); } } catch (IOException | IllegalAccessException e) { e.printStackTrace(); } finally { try { if (fileWriter != null) { fileWriter.flush(); fileWriter.close(); } if (csvFilePrinter != null) { csvFilePrinter.close(); } } catch (IOException e) { e.printStackTrace(); } } }
From source file:com.itemanalysis.jmetrik.file.JmetrikFileImporter.java
private void convertFile() { CSVParser parser = null;//from w w w .j a v a 2 s. c o m Reader reader = null; CSVPrinter printer = null; Writer writer = null; try { if (outputFile.exists()) { if (!overwrite) { theException = new IOException("File already exists and overwrite==false"); return; } } else { outputFile.createNewFile(); } //For debugging // System.out.println("CREATED: " + outputFile.getAbsolutePath()); //Writer header to file writer = new OutputStreamWriter(new FileOutputStream(outputFile)); printer = new CSVPrinter(writer, CSVFormat.DEFAULT.withCommentMarker('#')); printer.printComment("VERSION"); printer.printRecord(new String[] { "jmetrik1" }); printer.printComment("METADATA"); printer.printRecord(new String[] { Integer.valueOf(nrow).toString() }); printer.printComment("ATTRIBUTES"); for (VariableName v : variableAttributeMap.keySet()) { printer.printRecord(variableAttributeMap.get(v).getAttributeArray()); } printer.printComment("DATA"); //Write data to file reader = new InputStreamReader(new BOMInputStream(new FileInputStream(dataFile)), "UTF-8"); parser = new CSVParser(reader, dataFileFormat); if (hasHeader) { parser = new CSVParser(reader, dataFileFormat.withHeader(colNames).withSkipHeaderRecord(true)); } else { parser = new CSVParser(reader, dataFileFormat.withHeader(colNames)); } Iterator<CSVRecord> iter = parser.iterator(); CSVRecord csvRecord = null; VariableAttributes variableAttributes = null; DataType dataType = null; String temp = ""; while (iter.hasNext()) { csvRecord = iter.next(); for (VariableName v : variableAttributeMap.keySet()) { temp = csvRecord.get(v.toString()); variableAttributes = variableAttributeMap.get(v); dataType = variableAttributes.getDataType(); if (!variableAttributes.isMissing(temp)) { if (DataType.INTEGER == dataType) { printer.print(Double.valueOf(Double.parseDouble(temp)).intValue()); } else if (DataType.DOUBLE == dataType) { printer.print(Double.parseDouble(temp)); } else { printer.print(temp); } } else { printer.print(temp); } } printer.println(); } } catch (IOException ex) { theException = ex; } finally { try { if (parser != null) parser.close(); if (reader != null) reader.close(); if (printer != null) printer.close(); if (writer != null) writer.close(); } catch (IOException ex) { theException = ex; logger.fatal(ex); } } }
From source file:com.hurence.logisland.service.cache.CSVKeyValueCacheService.java
@Override // @OnEnabled//w ww . java2 s.c o m public void init(ControllerServiceInitializationContext context) throws InitializationException { super.init(context); try { if (context.getPropertyValue(DATABASE_FILE_URI).isSet()) { dbUri = context.getPropertyValue(DATABASE_FILE_URI).asString(); } if (context.getPropertyValue(DATABASE_FILE_PATH).isSet()) { dbPath = context.getPropertyValue(DATABASE_FILE_PATH).asString(); } if ((dbUri == null) && (dbPath == null)) { throw new Exception( "You must declare " + DATABASE_FILE_URI.getName() + " or " + DATABASE_FILE_PATH.getName()); } InputStream is = null; if (dbUri != null) { logger.info("opening csv database from hdfs : " + dbUri); is = initFromUri(dbUri); } if (dbPath != null) { logger.info("opening csv database from local fs : " + dbPath); is = initFromPath(context, dbPath); } if (is == null) { throw new InitializationException("Something went wrong while initializing csv db from " + DATABASE_FILE_URI.getName() + " or " + DATABASE_FILE_PATH.getName()); } // final Reader reader = new InputStreamReader(is); CSVFormat format = CSVFormat.DEFAULT; if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_EXCEL.getValue())) { format = CSVFormat.EXCEL; } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_EXCEL_FR.getValue())) { format = CSVFormat.EXCEL.withDelimiter(';'); } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_MYSQL.getValue())) { format = CSVFormat.MYSQL; } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_RFC4180.getValue())) { format = CSVFormat.RFC4180; } else if (context.getPropertyValue(CSV_FORMAT).asString().equals(CSV_TDF.getValue())) { format = CSVFormat.TDF; } if (context.getPropertyValue(CSV_HEADER).isSet()) { String[] columnNames = context.getPropertyValue(CSV_HEADER).asString().split(","); for (String name : columnNames) { headers.get().put(name, "string"); } format = format.withHeader(columnNames); } else if (context.getPropertyValue(FIRST_LINE_HEADER).isSet()) { format = format.withFirstRecordAsHeader(); } else { throw new InitializationException("unable to get headers from somewhere"); } Charset charset = Charset.forName("UTF-8"); if (context.getPropertyValue(ENCODING_CHARSET).isSet()) { String encoding = context.getPropertyValue(ENCODING_CHARSET).asString(); charset = Charset.forName(encoding); } rowKey = context.getPropertyValue(ROW_KEY).asString(); CSVParser parser = CSVParser.parse(is, charset, format); //new CSVParser(reader, format); /* * CSVParser parser = null; if (context.getPropertyValue(ENCODING_CHARSET).isSet()) { String encoding = context.getPropertyValue(ENCODING_CHARSET).asString(); parser = CSVParser.parse(reader, Charset.forName(encoding), format); } else { parser = CSVParser.parse(reader, format); } */ long count = 0; try { final Set<String> columnNames = parser.getHeaderMap().keySet(); for (final CSVRecord record : parser) { Record logislandRecord = new StandardRecord(); for (final String column : columnNames) { logislandRecord.setStringField(column, record.get(column)); } set(logislandRecord.getField(rowKey).asString(), logislandRecord); count++; } } finally { logger.info("successfully loaded " + count + " records from CSV file"); parser.close(); is.close(); } } catch (Exception e) { getLogger().error("Could not load database file: {}", new Object[] { e.getMessage() }); throw new InitializationException(e); } }
From source file:com.team3637.service.TeamServiceMySQLImpl.java
@Override public void importCSV(String inputFile) { try {/*from ww w . j av a2s . c o m*/ String csvData = new String(Files.readAllBytes(FileSystems.getDefault().getPath(inputFile))); csvData = csvData.replaceAll("\\r", ""); CSVParser parser = CSVParser.parse(csvData, CSVFormat.DEFAULT.withRecordSeparator("\n")); for (CSVRecord record : parser) { Team team = new Team(); team.setId(Integer.parseInt(record.get(0))); team.setTeam(Integer.parseInt(record.get(1))); team.setAvgscore(Double.parseDouble(record.get(2))); team.setMatches(Integer.parseInt(record.get(3))); String[] tags = record.get(4).substring(1, record.get(4).length() - 1).split(","); for (int i = 0; i < tags.length; i++) tags[i] = tags[i].trim(); if (tags.length > 0 && !tags[0].equals("")) team.setTags(Arrays.asList(tags)); else team.setTags(new ArrayList<String>()); if (checkForTeam(team.getTeam())) update(team); else create(team); } } catch (IOException e) { e.printStackTrace(); } }
From source file:edu.caltech.ipac.firefly.server.util.ipactable.DataGroupReader.java
public static Format guessFormat(File inf) throws IOException { String fileExt = FileUtil.getExtension(inf); if (fileExt != null) { if (fileExt.equalsIgnoreCase("tbl")) { return Format.IPACTABLE; } else if (fileExt.equalsIgnoreCase("csv")) { return Format.CSV; } else if (fileExt.equalsIgnoreCase("tsv")) { return Format.TSV; } else if (fileExt.equalsIgnoreCase("fits")) { return Format.FITS; } else if (fileExt.equalsIgnoreCase("json")) { return Format.JSON; }/*from www .j av a2 s .c om*/ } int readAhead = 10; int row = 0; BufferedReader reader = new BufferedReader(new FileReader(inf), IpacTableUtil.FILE_IO_BUFFER_SIZE); try { String line = reader.readLine(); if (line.startsWith("{")) { return Format.JSON; } int[][] counts = new int[readAhead][2]; int csvIdx = 0, tsvIdx = 1; while (line != null && row < readAhead) { if (line.startsWith("|") || line.startsWith("\\")) { return Format.IPACTABLE; } else if (line.startsWith("COORD_SYSTEM: ") || line.startsWith("EQUINOX: ") || line.startsWith("NAME-RESOLVER: ")) { //NOTE: a fixed targets file contains the following lines at the beginning: //COORD_SYSTEM: xxx //EQUINOX: xxx //NAME-RESOLVER: xxx return Format.FIXEDTARGETS; } counts[row][csvIdx] = CSVFormat.DEFAULT.parse(new StringReader(line)).iterator().next().size(); counts[row][tsvIdx] = CSVFormat.TDF.parse(new StringReader(line)).iterator().next().size(); row++; line = reader.readLine(); } // check csv int c = counts[0][csvIdx]; boolean cMatch = true; for (int i = 1; i < row; i++) { cMatch = cMatch && counts[i][csvIdx] == c; } // check tsv int t = counts[0][tsvIdx]; boolean tMatch = true; for (int i = 1; i < row; i++) { tMatch = tMatch && counts[i][tsvIdx] == t; } if (cMatch && tMatch) { if (t > c) { return Format.TSV; } else { return Format.CSV; } } else { if (cMatch) { return Format.CSV; } else if (tMatch) { return Format.TSV; } else { return Format.UNKNOWN; } } } finally { try { reader.close(); } catch (Exception e) { e.printStackTrace(); } } }