Example usage for com.fasterxml.jackson.dataformat.csv CsvMapper CsvMapper

List of usage examples for com.fasterxml.jackson.dataformat.csv CsvMapper CsvMapper

Introduction

In this page you can find the example usage for com.fasterxml.jackson.dataformat.csv CsvMapper CsvMapper.

Prototype

public CsvMapper() 

Source Link

Usage

From source file:org.jberet.support.io.JacksonCsvItemReaderWriterBase.java

@Override
protected void initJsonFactory() throws Exception {
    if (jsonFactoryLookup != null) {
        jsonFactory = InitialContext.doLookup(jsonFactoryLookup);
    } else {/*from   w  w  w .ja v a 2  s. c  om*/
        jsonFactory = new CsvFactory(new CsvMapper());
    }
}

From source file:nl.esciencecenter.medim.dicom.types.DicomTags.java

protected void readFromText(String txt) throws IOException {
    // Pass I: remove comments including the ending newline!
    Pattern pat = Pattern.compile("^#.*\n", Pattern.MULTILINE);
    String newTxt = pat.matcher(txt).replaceAll("");
    // Not needed: Pass II: remove empty lines as a result of the
    // pat=Pattern.compile("\n\n",Pattern.MULTILINE);
    // newTxt=pat.matcher(newTxt).replaceAll("");

    // ObjectMapper mapper=new ObjectMapper();
    CsvMapper mapper = new CsvMapper();
    CsvSchema schema = mapper.schemaFor(CsvTagLine.class); // create object mapping from CsvLine.class

    // CsvSchema schema = CsvSchema.builder()
    // .addColumn(CSV_GROUP)
    // .addColumn(CSV_ELEMENT)
    // .addColumn(CSV_VR)
    // .addColumn(CSV_NAME)
    // .build();/*from  ww w.  jav  a 2s .  co  m*/

    MappingIterator<CsvTagLine> mi = mapper.reader(CsvTagLine.class).with(schema).readValues(newTxt);

    List<TagDirective> tags = new ArrayList<TagDirective>();

    // skip first:
    CsvTagLine header = mi.nextValue();

    // check header values.
    while (mi.hasNextValue()) {
        CsvTagLine line = mi.nextValue();
        TagDirective tag = new TagDirective();
        // do something?
        tag.tagNr = StringUtil.parseHexidecimal(line.group) * 0x10000
                + StringUtil.parseHexidecimal(line.element);
        tag.name = line.name;
        line.keep = StringUtil.stripWhiteSpace(line.keep);
        line.options = StringUtil.stripWhiteSpace(line.options);

        // Support OX
        if (StringUtil.equalsIgnoreCase(line.VR, "OX"))
            line.VR = "OB"; // treat as bytes;

        VRType vrType = VRType.valueOf(line.VR);
        tag.vr = vrType.vr();

        boolean keep = false;

        if (StringUtil.isWhiteSpace(line.keep) == false)
            keep = (Integer.parseInt(line.keep) > 0);

        if (keep == false) {
            tag.option = TagProcessingOption.DELETE;
        } else {
            // check option:
            // System.err.printf("- %s | %s | %s | %s\n",line.group,line.element,line.keep,line.options);
            if (StringUtil.isWhiteSpace(line.options) == false) {
                tag.option = TagProcessingOption.valueOfOrNull(line.options, true);
                // error parsing option:
                if (tag.option == null) {
                    throw new IOException("Parse Error: could not parse Tag Option:" + line.options);
                }
            } else {
                tag.option = TagProcessingOption.KEEP; // no option -> keep.
            }
        }

        tags.add(tag);
    }

    // POST: check tags:

    for (int i = 0; i < tags.size(); i++) {
        TagDirective tag = tags.get(i);
        // logger.debugPritnf("TagOption: 0x%8x '%s' : %s\n",tag.tagNr,tag.name,tag.option);
        this.dicomTags.put(tag.tagNr, tag); // register
    }
}

From source file:nl.esciencecenter.ptk.csv.CSVData.java

public void parseText(String csvText) throws IOException {
    // Extended CSV !
    // Pass I: remove comments including the ending newline!
    Pattern pat = Pattern.compile("^#.*\n", Pattern.MULTILINE);
    csvText = pat.matcher(csvText).replaceAll("");

    // todo: check how jackson can parse alternative field separators;
    if (fieldSeparators != null) {
        // csvText=csvText.replaceAll(",","_");

        for (String sep : fieldSeparators) {
            // lazy replace
            csvText = csvText.replaceAll(sep, ",");
        }//from ww w  .j a  va 2  s.  co  m
    }

    // Not needed: Pass II: remove empty lines as a result of the
    // pat=Pattern.compile("\n\n",Pattern.MULTILINE);
    // newTxt=pat.matcher(newTxt).replaceAll("");

    // ObjectMapper mapper=new ObjectMapper();
    CsvMapper mapper = new CsvMapper();
    mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);

    MappingIterator<Object[]> it = mapper.reader(Object[].class).readValues(csvText);

    if (it.hasNext() == false) {
        throw new IOException("Empty text or csv text contains no headers!");
    }

    // read header:
    Object headers[] = it.next();

    StringList list = new StringList();
    for (int i = 0; i < headers.length; i++) {
        list.add(headers[i].toString());
    }

    logger.debugPrintf("Headers=%s\n", list.toString("<>"));
    headerList = list;

    data = new ArrayList<String[]>();

    // check header values.
    while (it.hasNext()) {
        Object line[] = it.next();
        String row[] = new String[line.length];

        for (int j = 0; j < line.length; j++) {
            Object value = line[j];
            if (value != null) {
                row[j] = value.toString();
            }
        }
        data.add(row);
    }

    logger.debugPrintf("Read %d number of rows\n", data.size());
}

From source file:org.opentestsystem.ap.ivs.service.ValidationUtility.java

public List<ErrorReport> parseErrorReport(final Path reportFolder) {
    final Path errorFilePath = reportFolder.resolve(this.ivsProperties.getErrorReportFileName());
    try {// ww  w  . j a  v a2 s  .  c  o  m
        final MappingIterator<ErrorReport> results = new CsvMapper().readerWithTypedSchemaFor(ErrorReport.class)
                .readValues(errorFilePath.toFile());
        return results.readAll();
    } catch (IOException e) {
        throw new SystemException("Error converting item history list to CSV", e);
    }
}

From source file:com.couchbase.devex.CSVConfig.java

@Override
public Observable<Document> startImport() {
    FileInputStream csvFile;/*from w  ww.j  a v  a 2s  .  co m*/
    try {
        csvFile = new FileInputStream(getCsvFilePath());
        CsvMapper mapper = new CsvMapper();
        mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
        CsvSchema csvSchema = CsvSchema.emptySchema().withColumnSeparator(getColumnSeparator())
                .withQuoteChar(getQuoteChar());
        ObjectReader reader = mapper.reader(String[].class);
        MappingIterator<String[]> it = reader.with(csvSchema).readValues(csvFile);
        if (!getSkipFirstLineForNames()) {
            String[] firstline = it.next();
            updateColumnNames(firstline);
        }
        return Observable.from(new Iterable<String[]>() {
            @Override
            public Iterator<String[]> iterator() {
                return it;
            }
        }).flatMap(line -> createNode(line));
    } catch (FileNotFoundException e) {
        return Observable.error(e);
    } catch (IOException e) {
        return Observable.error(e);
    }
}

From source file:datadidit.helpful.hints.processors.csv.converter.ConvertCSVToJSON.java

public List<Map<?, ?>> readObjectsFromCsv(String fileContent) throws JsonProcessingException, IOException {
    CsvMapper csvMapper = new CsvMapper();
    MappingIterator<Map<?, ?>> mappingIterator = csvMapper.readerFor(Map.class).with(schema)
            .readValues(fileContent);//from   w  w w.  ja  v  a 2 s .  com

    return this.fixMap(mappingIterator.readAll());
}

From source file:com.datafibers.kafka.connect.SchemaedFileSourceTask.java

private List<SourceRecord> pollFromFile() throws InterruptedException {
    log.trace("pollFromFile");
    CsvSchema bootstrapCsv;/*  ww w  .ja  v a 2s.c  o  m*/
    CsvMapper csvMapper = new CsvMapper();
    ObjectMapper jsonMapper = new ObjectMapper();
    MappingIterator<Map<?, ?>> mappingIterator;
    ArrayList<SourceRecord> records = null;
    long currentTime = System.currentTimeMillis();
    long recordsPerPoll;

    // TODO: Improve ExceptionOnEof logic.
    // The code below only works when each pass through
    // poll() reads all available records (not a given).
    if (config.getExceptionOnEof() && streamOffset != null) {
        throw new ConnectException("No more deta available on FileInputStream");
    }

    // Initialize the bootstrapCsv schema if necessary
    if (recordSchema == null || inputType.equalsIgnoreCase("json")) {
        log.trace("Constructing csvSchema from emptySchema");
        bootstrapCsv = config.getCsvHeaders() ? CsvSchema.emptySchema().withHeader()
                : CsvSchema.emptySchema().withoutHeader();
    } else {
        // We've seen a schema, so we'll assume headers from the recordSchema
        log.trace("Constructing csvSchema from recordSchema");
        CsvSchema.Builder builder = new CsvSchema.Builder();
        builder.setUseHeader(false);
        builder.setColumnSeparator(',');
        for (Field f : recordSchema.fields()) {
            log.trace("adding column {}", f.name());
            builder.addColumn(f.name());
        }
        bootstrapCsv = builder.build();
    }
    try {
        if (stream == null)
            openFileStream();
        if (reader == null)
            reader = new BufferedReader(new InputStreamReader(stream));

        if (inputType.equalsIgnoreCase("json")) {
            mappingIterator = jsonMapper.readerFor(Map.class).readValues(reader);
        } else if (inputType.equalsIgnoreCase("csv")) {
            mappingIterator = csvMapper.readerWithSchemaFor(Map.class).with(bootstrapCsv).readValues(reader);
        } else {
            log.error("Unsupported file input type specified ({})", inputType);
            return null;
        }
    } catch (FileNotFoundException fnf) {
        log.warn("Couldn't find file {} for SchemaedFileSourceTask, sleeping to wait for it to be created",
                logFilename());
        synchronized (this) {
            this.wait(1000);
        }
        return null;
    } catch (IOException e) {
        // IOException thrown when no more records in stream
        log.warn("Processed all available data from {}; sleeping to wait additional records", logFilename());
        // Close reader and stream; swallowing exceptions ... we're about to throw a Retry
        try {
            reader.close();
        } catch (Exception nested) {
        } finally {
            reader = null;
        }

        if (stream != System.in) {
            try {
                stream.close();
            } catch (Exception nested) {
            } finally {
                stream = null;
            }
        }

        synchronized (this) {
            this.wait(1000);
        }
        return null;
    }
    log.debug("mappingIterator of type {} created; begin reading data file",
            mappingIterator.getClass().toString());

    // The csvMapper class is really screwy; can't figure out why it
    // won't return a rational Schema ... so we'll extract it from the
    // the first object later.
    if (recordSchema == null && inputType.equalsIgnoreCase("csv") && csvMapper.schema().size() > 0) {
        recordSchema = ConvertMappingSchema(csvMapper.schemaWithHeader());
        log.trace("recordSchema created from csvMapper; type {}", recordSchema.type().toString());
    }
    try {
        FileInputStream fstream = (FileInputStream) stream;
        Long lastElementOffset = streamOffset;
        recordsPerPoll = 3;

        while (mappingIterator.hasNext()) {
            Map<?, ?> element = mappingIterator.next();
            Long elementOffset, iteratorOffset;
            recordCount++;
            recordsPerPoll--;

            iteratorOffset = mappingIterator.getCurrentLocation().getByteOffset(); // never works !!!
            if (iteratorOffset < 0) {
                // The stream channel will CLOSE on the last clean record
                // seen by mapping Iterator, so we have be careful here
                // Additionally, when parsing CSV files, there seems to be a
                // lot of Bad File Descriptor errors; ignore them.
                try {
                    elementOffset = fstream.getChannel().position();
                } catch (java.nio.channels.ClosedChannelException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                } catch (IOException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                }
            } else {
                log.trace("mappingIterator.getCurrentLocation() returns {}", iteratorOffset.toString());
                elementOffset = iteratorOffset;
            }
            log.trace("Next input record: {} (class {}) from file position {}", element.toString(),
                    element.getClass().toString(), elementOffset.toString());

            if (recordSchema == null) {
                recordSchema = ConvertMappingSchema(element.keySet());
                log.trace("recordSchema created from element; type {}", recordSchema.type().toString());
            }

            if (records == null)
                records = new ArrayList<>();
            records.add(new SourceRecord(offsetKey(filename), offsetValue(elementOffset), topic, recordSchema,
                    ConvertMappingElement(recordSchema, (HashMap<?, ?>) element)));
            streamOffset = lastElementOffset = elementOffset;
        }
    } catch (Exception e) {
        throw new ConnectException(e);
    }

    lastPollTime = currentTime;
    return records;
}

From source file:com.marklogic.client.test.JacksonDatabindTest.java

/** Demonstrate using Jackson's CSV mapper directly to simplify reading in data, populating a 
 * third-party pojo (one we cannot annotate) then writing it out
 * via JacksonDatabindHandle with configuration provided by mix-in annotations.
 **///from  w  w w. j a v  a  2  s .  co  m
@Test
public void testDatabindingThirdPartyPojoWithMixinAnnotations() throws JsonProcessingException, IOException {
    CsvSchema schema = CsvSchema.builder().setColumnSeparator('\t').addColumn("geoNameId").addColumn("name")
            .addColumn("asciiName").addColumn("alternateNames")
            .addColumn("latitude", CsvSchema.ColumnType.NUMBER)
            .addColumn("longitude", CsvSchema.ColumnType.NUMBER).addColumn("featureClass")
            .addColumn("featureCode").addColumn("countryCode").addColumn("countryCode2").addColumn("adminCode1")
            .addColumn("adminCode2").addColumn("adminCode3").addColumn("adminCode4").addColumn("population")
            .addColumn("elevation", CsvSchema.ColumnType.NUMBER).addColumn("dem", CsvSchema.ColumnType.NUMBER)
            .addColumn("timezoneCode").addColumn("lastModified").build();
    CsvMapper mapper = new CsvMapper();
    mapper.addMixInAnnotations(Toponym.class, ToponymMixIn1.class);
    ObjectReader reader = mapper.reader(Toponym.class).with(schema);
    BufferedReader cityReader = new BufferedReader(Common.testFileToReader(CITIES_FILE));
    GenericDocumentManager docMgr = Common.client.newDocumentManager();
    DocumentWriteSet set = docMgr.newWriteSet();
    String line = null;
    for (int numWritten = 0; numWritten < MAX_TO_WRITE
            && (line = cityReader.readLine()) != null; numWritten++) {
        Toponym city = reader.readValue(line);
        JacksonDatabindHandle handle = new JacksonDatabindHandle(city);
        handle.getMapper().addMixInAnnotations(Toponym.class, ToponymMixIn2.class);
        set.add(DIRECTORY + "/thirdPartyJsonCities/" + city.getGeoNameId() + ".json", handle);
    }
    docMgr.write(set);
    cityReader.close();
    // we can add assertions later, for now this test just serves as example code and 
    // ensures no exceptions are thrown
}

From source file:edu.cmu.cs.lti.discoursedb.annotation.lightside.io.LightSideService.java

@Transactional(readOnly = true)
private String generateLightSideOutput(List<RawDataInstance> data) throws JsonProcessingException {
    StringBuilder output = new StringBuilder();
    CsvMapper mapper = new CsvMapper();

    //generate list of binary label types
    Set<String> binaryLabelTypes = data.stream().parallel()
            .flatMap(instance -> instance.getAnnotations().entrySet().stream())
            .filter(m -> m.getValue().toLowerCase().equals(LABEL_ASSIGNED_VAL))
            .map(m -> m.getKey().toLowerCase()).collect(Collectors.toSet());

    //generate header
    Set<String> types = data.stream().parallel()
            .flatMap(instance -> instance.getAnnotations().entrySet().stream())
            .map(m -> m.getKey().toLowerCase()).collect(Collectors.toSet());

    Assert.isTrue(!types.contains(TEXT_COL), "No feature with the name \"" + TEXT_COL + "\" is allowed.");

    List<String> header = new ArrayList<>(types.size() + 1);
    header.add(TEXT_COL);/* w  ww  . j  a  va  2s  . com*/
    header.addAll(types);
    output.append(mapper.writeValueAsString(header));

    //generate data vectors
    for (RawDataInstance instance : data) {
        List<String> featVector = new ArrayList<>(header.size());
        featVector.add(instance.getText());
        Map<String, String> curInstAnnos = instance.getAnnotations();
        for (String type : types) {
            //Label assigned to current instance 
            if (curInstAnnos.containsKey(type)) {
                featVector.add(curInstAnnos.get(type));
            }
            //Label not assigned to current instance - handle missing value 
            else {
                if (binaryLabelTypes.contains(type)) {
                    //missing binary label interpreted as "false"
                    featVector.add(LABEL_MISSING_VAL);
                } else {
                    //missing value on interpreted as "null"
                    featVector.add(VALUE_MISSING_VAL);
                }
            }
        }
        Assert.isTrue(featVector.size() == header.size(), "Error writing feature vector. Wrong size.");
        output.append(mapper.writeValueAsString(featVector));
    }
    return output.toString();
}

From source file:net.arp7.HdfsPerfTest.WriteFile.java

private static void writeCsvResult(final FileIoStats stats) {
    if (params.getResultCsvFile() == null) {
        return;/*from ww  w  .  j ava 2  s . c om*/
    }

    final Object[] results = new Object[] { new Date().toGMTString(), params.getNumFiles(),
            params.getNumThreads(), params.getReplication(), params.getBlockSize(), params.getIoSize(),
            stats.getFilesWritten(), stats.getBytesWritten(), stats.getMeanCreateTimeMs(),
            stats.getMeanWriteTimeMs(), stats.getMeanCloseTimeMs(), stats.getElapsedTimeMs(),
            (params.getFileSize() * 1000) / stats.getElapsedTimeMs(),
            (params.getNumFiles() * params.getFileSize() * 1000) / stats.getElapsedTimeMs(), params.getNote() };

    final CsvSchema schema = CsvSchema.builder().setColumnSeparator(';').setQuoteChar('"')
            .setUseHeader(!params.getResultCsvFile().exists())
            .addColumn("timestamp", CsvSchema.ColumnType.STRING)
            .addColumn("number of files", CsvSchema.ColumnType.NUMBER)
            .addColumn("number of threads", CsvSchema.ColumnType.NUMBER)
            .addColumn("replication factor", CsvSchema.ColumnType.NUMBER)
            .addColumn("block size", CsvSchema.ColumnType.NUMBER)
            .addColumn("io size", CsvSchema.ColumnType.NUMBER)
            .addColumn("total files written", CsvSchema.ColumnType.NUMBER)
            .addColumn("total bytes written", CsvSchema.ColumnType.NUMBER)
            .addColumn("mean time to create file in ms", CsvSchema.ColumnType.NUMBER)
            .addColumn("mean time to write file in ms", CsvSchema.ColumnType.NUMBER)
            .addColumn("mean time to close file in ms", CsvSchema.ColumnType.NUMBER)
            .addColumn("total ms", CsvSchema.ColumnType.NUMBER)
            .addColumn("mean throughput bytes per s", CsvSchema.ColumnType.NUMBER)
            .addColumn("aggregate throughput bytes per s", CsvSchema.ColumnType.NUMBER)
            .addColumn("note", CsvSchema.ColumnType.STRING).build();

    try (FileWriter fileWriter = new FileWriter(params.getResultCsvFile(), true)) {
        final CsvMapper mapper = new CsvMapper();
        final ObjectWriter writer = mapper.writer(schema);
        writer.writeValue(fileWriter, results);
    } catch (IOException e) {
        LOG.error("Could not write results to CSV file '{}': '{}'", params.getResultCsvFile().getPath(),
                e.getMessage());
    }
}