Example usage for org.apache.commons.csv CSVParser getHeaderMap

List of usage examples for org.apache.commons.csv CSVParser getHeaderMap

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVParser getHeaderMap.

Prototype

public Map<String, Integer> getHeaderMap() 

Source Link

Document

Returns a copy of the header map that iterates in column order.

Usage

From source file:mtsar.api.csv.WorkerCSV.java

public static Iterator<Worker> parse(Stage stage, CSVParser csv) {
    final Set<String> header = csv.getHeaderMap().keySet();
    checkArgument(!Sets.intersection(header, Sets.newHashSet(HEADER)).isEmpty(), "Unknown CSV header: %s",
            String.join(",", header));

    return StreamSupport.stream(csv.spliterator(), false).map(row -> {
        final String id = row.isSet("id") ? row.get("id") : null;
        final String[] tags = row.isSet("tags") && !StringUtils.isEmpty(row.get("tags"))
                ? row.get("tags").split("\\|")
                : new String[0];
        final String datetime = row.isSet("datetime") ? row.get("datetime") : null;

        return new Worker.Builder().setId(StringUtils.isEmpty(id) ? null : Integer.valueOf(id))
                .setStage(stage.getId()).addAllTags(Arrays.asList(tags))
                .setDateTime(new Timestamp(StringUtils.isEmpty(datetime) ? System.currentTimeMillis()
                        : Long.parseLong(datetime) * 1000L))
                .build();//from   w  w  w  . ja  va  2  s . c  om
    }).iterator();
}

From source file:mtsar.api.csv.TaskCSV.java

public static Iterator<Task> parse(Stage stage, CSVParser csv) {
    final Set<String> header = csv.getHeaderMap().keySet();
    checkArgument(!Sets.intersection(header, Sets.newHashSet(HEADER)).isEmpty(), "Unknown CSV header: %s",
            String.join(",", header));

    return StreamSupport.stream(csv.spliterator(), false).map(row -> {
        final String id = row.isSet("id") ? row.get("id") : null;
        final String[] tags = row.isSet("tags") && !StringUtils.isEmpty(row.get("tags"))
                ? row.get("tags").split("\\|")
                : new String[0];
        final String type = row.get("type");
        final String description = row.isSet("description") ? row.get("description") : null;
        final String[] answers = row.isSet("answers") && !StringUtils.isEmpty(row.get("answers"))
                ? row.get("answers").split("\\|")
                : new String[0];
        final String datetime = row.isSet("datetime") ? row.get("datetime") : null;

        return new Task.Builder().setId(StringUtils.isEmpty(id) ? null : Integer.valueOf(id))
                .setStage(stage.getId()).addAllTags(Arrays.asList(tags))
                .setDateTime(new Timestamp(StringUtils.isEmpty(datetime) ? System.currentTimeMillis()
                        : Long.parseLong(datetime) * 1000L))
                .setType(StringUtils.defaultIfEmpty(type, TaskDAO.TASK_TYPE_SINGLE)).setDescription(description)
                .addAllAnswers(Arrays.asList(answers)).build();
    }).iterator();/*from  ww  w  .  java 2s  . c  o  m*/
}

From source file:mtsar.api.csv.AnswerCSV.java

public static Iterator<Answer> parse(Stage stage, CSVParser csv) {
    final Set<String> header = csv.getHeaderMap().keySet();
    checkArgument(!Sets.intersection(header, Sets.newHashSet(HEADER)).isEmpty(), "Unknown CSV header: %s",
            String.join(",", header));

    return StreamSupport.stream(csv.spliterator(), false).map(row -> {
        final String id = row.isSet("id") ? row.get("id") : null;
        final String[] tags = row.isSet("tags") && !StringUtils.isEmpty(row.get("tags"))
                ? row.get("tags").split("\\|")
                : new String[0];
        final String type = row.isSet("type") ? row.get("type") : null;
        final String workerId = row.get("worker_id");
        final String taskId = row.get("task_id");
        final String[] answers = row.isSet("answers") && !StringUtils.isEmpty(row.get("answers"))
                ? row.get("answers").split("\\|")
                : new String[0];
        final String datetime = row.isSet("datetime") ? row.get("datetime") : null;

        return new Answer.Builder().setId(StringUtils.isEmpty(id) ? null : Integer.valueOf(id))
                .setStage(stage.getId()).addAllTags(Arrays.asList(tags))
                .setDateTime(new Timestamp(StringUtils.isEmpty(datetime) ? System.currentTimeMillis()
                        : Long.parseLong(datetime) * 1000L))
                .setType(StringUtils.defaultIfEmpty(type, AnswerDAO.ANSWER_TYPE_DEFAULT))
                .setWorkerId(Integer.valueOf(workerId)).setTaskId(Integer.valueOf(taskId))
                .addAllAnswers(Arrays.asList(answers)).build();
    }).iterator();//from   ww  w  .  j a  v  a  2  s.  c om
}

From source file:net.iaeste.iws.core.services.ExchangeCSVService.java

private static Set<String> readHeader(final CSVParser parser) {
    final Map<String, Integer> map = parser.getHeaderMap();

    if (map == null) {
        throw new IWSException(IWSErrors.CSV_HEADER_ERROR, "The CSV did not have a valid header.");
    }/*from w  ww .  j  a  v  a 2s .c om*/

    return map.keySet();
}

From source file:com.fbartnitzek.tasteemall.data.csv.CsvFileReader.java

/**
 * reads CSV file in data and headers with same count, uses CSV_Format RFC4180 (, and "")
 * @param file file to read/*from   w w  w  .  j  a  v  a 2  s  .com*/
 * @param headers expected columns
 * @return data
 */
public static List<List<String>> readCsvFileHeadingAndData(File file, List<String> headers) {

    List<List<String>> data = new ArrayList<>();

    CSVParser csvParser = null;
    Reader csvReader = null;
    try {
        csvReader = new FileReader(file);
        csvParser = new CSVParser(csvReader, CsvFileWriter.CSV_FORMAT_RFC4180.withHeader());
        Map<String, Integer> headerMap = csvParser.getHeaderMap();

        // print headerMap
        for (Map.Entry<String, Integer> entry : headerMap.entrySet()) {
            System.out.println(entry.getValue() + ": " + entry.getKey());
        }

        // should be same order!

        // 0 columns seems impossible, but valid

        // ordered columns instead unordered set (for insert)!
        headers.addAll(headerMap.keySet());
        for (CSVRecord record : csvParser) {
            List<String> dataEntry = new ArrayList<>();
            for (int i = 0; i < headers.size(); ++i) {
                if (i < record.size()) {
                    dataEntry.add(record.get(i));
                }
                //                    dataEntry.add(record.get(headers.get(i)));
            }
            data.add(dataEntry);
        }

    } catch (IOException e) {
        e.printStackTrace();
    } finally {
        try {
            if (csvReader != null) {
                csvReader.close();
            }
            if (csvParser != null) {
                csvParser.close();
            }
        } catch (IOException e) {
            //                System.out.println("Error while closing fileReader/csvFileParser !!!");
            e.printStackTrace();
        }
    }

    return data;
}

From source file:com.ggvaidya.scinames.model.Dataset.java

/**
 * Load this dataset from a CSV file. We load the entire CSV file, except
 * for blank cells.//from  ww  w. ja va 2s . com
 * 
 * @param project The project to which the resulting Dataset should belong
 * @param csvFormat The CSV format of the input file.
 * @param csvFile The input file to load.
 * @param renamedColumns Rename these columns on the fly.
 * @return
 * @throws IOException 
 */
public static Dataset fromCSV(CSVFormat csvFormat, File csvFile) throws IOException {
    Dataset dataset = new Dataset(csvFile.getName(), new SimplifiedDate(), Dataset.TYPE_CHECKLIST);

    // Get ready to filter input files.
    InputStream ins = new FileInputStream(csvFile);

    // Look for BOMs and discard!
    ins = new BOMInputStream(ins, false);

    // Convert into a Reader.
    Reader reader = new BufferedReader(new InputStreamReader(ins));

    // Load CSV
    CSVParser parser = csvFormat.withHeader().parse(reader);
    Map<String, Integer> headerMap = parser.getHeaderMap();

    dataset.setColumns(headerMap.entrySet().stream().sorted((Object o1, Object o2) -> {
        Map.Entry<String, Integer> e1 = (Map.Entry) o1;
        Map.Entry<String, Integer> e2 = (Map.Entry) o2;

        return e1.getValue().compareTo(e2.getValue());
    }).map(e -> e.getKey()).map(colName -> DatasetColumn.of(colName))
            /*
            .map(col -> {
               // Rename any renamedColumns.
               if(renamedColumns.containsKey(col))
            return renamedColumns.get(col);
               else
            return col;
            })*/
            .collect(Collectors.toList()));

    dataset.rows.clear();
    dataset.rows.addAll(parser.getRecords().stream().map(record -> {
        DatasetRow row = new DatasetRow(dataset);
        row.putAll(record.toMap());
        return row;
    }).collect(Collectors.toList()));

    return dataset;
}

From source file:com.ibm.util.merge.directive.provider.ProviderCsv.java

/**
 * Retrieve the data (superclass HTTP Provider) and parse the CSV data
 * @param cf/*www. ja  v  a 2s  .  co  m*/
 */
@Override
public void getData(MergeContext rtc) throws MergeException {
    // Get the data
    super.getData(rtc);

    DataTable newTable = new DataTable();
    CSVParser parser;
    try {
        parser = new CSVParser(new StringReader(getFetchedData()), CSVFormat.EXCEL.withHeader());
        for (String colName : parser.getHeaderMap().keySet()) {
            newTable.addCol(colName);
        }
        for (CSVRecord record : parser) {
            ArrayList<String> row = newTable.addNewRow();
            for (String field : record) {
                row.add(field);
            }
        }
        parser.close();
    } catch (IOException e) {
        throw new MergeException(this, e, "CSV Parser Stringreader IO Exception", getFetchedData());
    }
    if (newTable.size() > 0) {
        getTables().add(newTable);
    }
}

From source file:ch.eitchnet.csvrestendpoint.marshaller.CsvDataToHeaderJsonMarshaller.java

@Override
public JsonObject marshall(CSVParser csvParser) {

    JsonObject root = new JsonObject();
    root.addProperty("msg", "-");

    JsonArray data = new JsonArray();

    Set<Entry<String, Integer>> header = csvParser.getHeaderMap().entrySet();
    for (Entry<String, Integer> entry : header) {
        data.add(new JsonPrimitive(entry.getKey()));
    }//  w  w  w  . j a  v  a2s .  co  m

    root.add("data", data);

    return root;
}

From source file:com.danidemi.templategeneratormavenplugin.generation.impl.CsvRowSource.java

@Override
public Iterator<IRowModel> iterator() {

    try {//  w  w w.j  a v  a 2  s.c o m
        // get the reader from the resource
        CSVParser parser = CSVFormat.RFC4180.withFirstRecordAsHeader().parse(reader);

        // get the headers
        List<String> headersAsList = new ArrayList<>(parser.getHeaderMap().keySet());

        return new TransformIteratorAdapter<CSVRecord, IRowModel>(parser.iterator(),
                r -> new CsvRowModel(r, headersAsList));
    } catch (IOException e) {
        throw new RuntimeException(e);
    }

}

From source file:io.mindmaps.migration.csv.CSVSchemaMigrator.java

/**
 * Migrate a CSV parser as an entity type
 * @param entityType user-provided type of the entity representing the table
 * @param parser the parser to migrate//from w  ww.j a  v a2  s. c  o m
 * @return var patterns representing the resource and the entity
 */
public Collection<Var> migrateEntitySchema(String entityType, CSVParser parser) {
    Var type = var().isa("entity-type").id(entityType);

    Collection<Var> collection = Lists.newArrayList(type);

    Map<String, Integer> headers = parser.getHeaderMap();
    headers.keySet().stream().map(header -> migrateAsResource(entityType, header)).forEach(collection::addAll);

    return collection;
}