Example usage for com.fasterxml.jackson.dataformat.csv CsvSchema emptySchema

List of usage examples for com.fasterxml.jackson.dataformat.csv CsvSchema emptySchema

Introduction

In this page you can find the example usage for com.fasterxml.jackson.dataformat.csv CsvSchema emptySchema.

Prototype

public static CsvSchema emptySchema() 

Source Link

Document

Accessor for creating a "default" CSV schema instance, with following settings:
  • Does NOT use header line
  • Uses double quotes ('"') for quoting of field values (if necessary)
  • Uses comma (',') as the field separator
  • Uses Unix linefeed ('\n') as row separator
  • Does NOT use any escape characters
  • Does NOT have any columns defined

Usage

From source file:com.marklogic.entityservices.e2e.CSVLoader.java

public CSVLoader() {
    super();

    bootstrapSchema = CsvSchema.emptySchema().withHeader();
    csvMapper = new CsvMapper();
}

From source file:ed.cracken.code.SimpleTestExp1.java

public static List<Map<?, ?>> readObjectsFromCsv(String file) throws IOException {
    CsvSchema bootstrap = CsvSchema.emptySchema().withHeader();
    CsvMapper csvMapper = new CsvMapper();
    MappingIterator<Map<?, ?>> mappingIterator = csvMapper.reader(Map.class).with(bootstrap).readValues(file);

    return mappingIterator.readAll();
}

From source file:ro.fortsoft.dataset.csv.CsvDataSet.java

protected CsvSchema createCsvSchema() {
    return CsvSchema.emptySchema().withUseHeader(useHeader).withSkipFirstDataRow(skipFirstDataRow);
}

From source file:edu.cmu.cs.lti.discoursedb.io.mturk.converter.MturkConverter.java

License:asdf

private Iterable<Map<String, String>> csvIteratorExistingHeaders(String filename)
        throws JsonProcessingException, IOException {
    //InputStream in = new FileInputStream(filename, "UTF-8");
    InputStreamReader in = new InputStreamReader(new FileInputStream(filename), "ISO-8859-1");
    MappingIterator<Map<String, String>> iterator = new CsvMapper().readerFor(Map.class)
            .with(CsvSchema.emptySchema().withColumnSeparator(',').withHeader()).readValues(in);
    return () -> iterator;
}

From source file:datadidit.helpful.hints.processors.csv.converter.ConvertCSVToJSON.java

@OnScheduled
public void onScheduled(final ProcessContext context) throws ConfigurationException {
    //Retrieve properties from context
    Boolean header = context.getProperty(HEADER).asBoolean();
    String fieldNames = context.getProperty(FIELD_NAMES).getValue();

    /*// www .j  ava2 s .  c o  m
     * Create Schema based on properties from user. 
     */
    if (!header && fieldNames != null) {
        Builder build = CsvSchema.builder();
        for (String field : fieldNames.split(",")) {
            build.addColumn(field, CsvSchema.ColumnType.NUMBER_OR_STRING);
        }
        schema = build.build();
    } else if (header && fieldNames != null && !fieldNames.equals("")) {
        schema = this.buildCsvSchema(fieldNames, header);
    } else if (!header && fieldNames == null) {
        throw new ConfigurationException("File must either contain headers or you must provide them..");
    } else {
        schema = CsvSchema.emptySchema().withHeader();
    }
}

From source file:com.couchbase.devex.CSVConfig.java

@Override
public Observable<Document> startImport() {
    FileInputStream csvFile;/*from  w  w w. j  a v  a 2  s  .  c  om*/
    try {
        csvFile = new FileInputStream(getCsvFilePath());
        CsvMapper mapper = new CsvMapper();
        mapper.enable(CsvParser.Feature.WRAP_AS_ARRAY);
        CsvSchema csvSchema = CsvSchema.emptySchema().withColumnSeparator(getColumnSeparator())
                .withQuoteChar(getQuoteChar());
        ObjectReader reader = mapper.reader(String[].class);
        MappingIterator<String[]> it = reader.with(csvSchema).readValues(csvFile);
        if (!getSkipFirstLineForNames()) {
            String[] firstline = it.next();
            updateColumnNames(firstline);
        }
        return Observable.from(new Iterable<String[]>() {
            @Override
            public Iterator<String[]> iterator() {
                return it;
            }
        }).flatMap(line -> createNode(line));
    } catch (FileNotFoundException e) {
        return Observable.error(e);
    } catch (IOException e) {
        return Observable.error(e);
    }
}

From source file:org.jberet.support.io.JacksonCsvItemReader.java

@Override
public void open(final Serializable checkpoint) throws Exception {
    if (end == 0) {
        end = Integer.MAX_VALUE;/*from w w w .j  a v a2s  . co m*/
    }
    if (checkpoint != null) {
        start = (Integer) checkpoint;
    }
    if (start > end) {
        throw SupportMessages.MESSAGES.invalidStartPosition((Integer) checkpoint, start, end);
    }
    init();
    csvParser = (CsvParser) JsonItemReader.configureJsonParser(this, inputDecorator,
            deserializationProblemHandlers, jsonParserFeatures);

    if (csvParserFeatures != null) {
        for (final Map.Entry<String, String> e : csvParserFeatures.entrySet()) {
            final String key = e.getKey();
            final String value = e.getValue();
            final CsvParser.Feature feature;
            try {
                feature = CsvParser.Feature.valueOf(key);
            } catch (final Exception e1) {
                throw SupportMessages.MESSAGES.unrecognizedReaderWriterProperty(key, value);
            }
            if ("true".equals(value)) {
                if (!feature.enabledByDefault()) {
                    csvParser.configure(feature, true);
                }
            } else if ("false".equals(value)) {
                if (feature.enabledByDefault()) {
                    csvParser.configure(feature, false);
                }
            } else {
                throw SupportMessages.MESSAGES.invalidReaderWriterProperty(null, value, key);
            }
        }
    }

    rawAccess = beanType == List.class || beanType == String[].class;
    if (!rawAccess) {
        CsvSchema schema;
        if (columns != null) {
            schema = buildCsvSchema(null);
        } else {
            //columns not defined, but beanType is either Map.class or Pojo.class or JsonNode.class
            if (useHeader) {
                schema = buildCsvSchema(CsvSchema.emptySchema());
            } else {
                throw SupportMessages.MESSAGES.invalidReaderWriterProperty(null, columns, "columns");
            }
        }

        if (escapeChar != null) {
            schema = schema.withEscapeChar(escapeChar.charAt(0));
        }
        if (skipFirstDataRow != null) {
            schema = schema.withSkipFirstDataRow(Boolean.parseBoolean(skipFirstDataRow.trim()));
        }
        csvParser.setSchema(schema);
    }
}

From source file:com.datafibers.kafka.connect.SchemaedFileSourceTask.java

private List<SourceRecord> pollFromFile() throws InterruptedException {
    log.trace("pollFromFile");
    CsvSchema bootstrapCsv;//  w ww  .ja va 2s  . com
    CsvMapper csvMapper = new CsvMapper();
    ObjectMapper jsonMapper = new ObjectMapper();
    MappingIterator<Map<?, ?>> mappingIterator;
    ArrayList<SourceRecord> records = null;
    long currentTime = System.currentTimeMillis();
    long recordsPerPoll;

    // TODO: Improve ExceptionOnEof logic.
    // The code below only works when each pass through
    // poll() reads all available records (not a given).
    if (config.getExceptionOnEof() && streamOffset != null) {
        throw new ConnectException("No more deta available on FileInputStream");
    }

    // Initialize the bootstrapCsv schema if necessary
    if (recordSchema == null || inputType.equalsIgnoreCase("json")) {
        log.trace("Constructing csvSchema from emptySchema");
        bootstrapCsv = config.getCsvHeaders() ? CsvSchema.emptySchema().withHeader()
                : CsvSchema.emptySchema().withoutHeader();
    } else {
        // We've seen a schema, so we'll assume headers from the recordSchema
        log.trace("Constructing csvSchema from recordSchema");
        CsvSchema.Builder builder = new CsvSchema.Builder();
        builder.setUseHeader(false);
        builder.setColumnSeparator(',');
        for (Field f : recordSchema.fields()) {
            log.trace("adding column {}", f.name());
            builder.addColumn(f.name());
        }
        bootstrapCsv = builder.build();
    }
    try {
        if (stream == null)
            openFileStream();
        if (reader == null)
            reader = new BufferedReader(new InputStreamReader(stream));

        if (inputType.equalsIgnoreCase("json")) {
            mappingIterator = jsonMapper.readerFor(Map.class).readValues(reader);
        } else if (inputType.equalsIgnoreCase("csv")) {
            mappingIterator = csvMapper.readerWithSchemaFor(Map.class).with(bootstrapCsv).readValues(reader);
        } else {
            log.error("Unsupported file input type specified ({})", inputType);
            return null;
        }
    } catch (FileNotFoundException fnf) {
        log.warn("Couldn't find file {} for SchemaedFileSourceTask, sleeping to wait for it to be created",
                logFilename());
        synchronized (this) {
            this.wait(1000);
        }
        return null;
    } catch (IOException e) {
        // IOException thrown when no more records in stream
        log.warn("Processed all available data from {}; sleeping to wait additional records", logFilename());
        // Close reader and stream; swallowing exceptions ... we're about to throw a Retry
        try {
            reader.close();
        } catch (Exception nested) {
        } finally {
            reader = null;
        }

        if (stream != System.in) {
            try {
                stream.close();
            } catch (Exception nested) {
            } finally {
                stream = null;
            }
        }

        synchronized (this) {
            this.wait(1000);
        }
        return null;
    }
    log.debug("mappingIterator of type {} created; begin reading data file",
            mappingIterator.getClass().toString());

    // The csvMapper class is really screwy; can't figure out why it
    // won't return a rational Schema ... so we'll extract it from the
    // the first object later.
    if (recordSchema == null && inputType.equalsIgnoreCase("csv") && csvMapper.schema().size() > 0) {
        recordSchema = ConvertMappingSchema(csvMapper.schemaWithHeader());
        log.trace("recordSchema created from csvMapper; type {}", recordSchema.type().toString());
    }
    try {
        FileInputStream fstream = (FileInputStream) stream;
        Long lastElementOffset = streamOffset;
        recordsPerPoll = 3;

        while (mappingIterator.hasNext()) {
            Map<?, ?> element = mappingIterator.next();
            Long elementOffset, iteratorOffset;
            recordCount++;
            recordsPerPoll--;

            iteratorOffset = mappingIterator.getCurrentLocation().getByteOffset(); // never works !!!
            if (iteratorOffset < 0) {
                // The stream channel will CLOSE on the last clean record
                // seen by mapping Iterator, so we have be careful here
                // Additionally, when parsing CSV files, there seems to be a
                // lot of Bad File Descriptor errors; ignore them.
                try {
                    elementOffset = fstream.getChannel().position();
                } catch (java.nio.channels.ClosedChannelException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                } catch (IOException e) {
                    log.trace("getChannel.position threw {}", e.toString());
                    elementOffset = lastElementOffset;
                }
            } else {
                log.trace("mappingIterator.getCurrentLocation() returns {}", iteratorOffset.toString());
                elementOffset = iteratorOffset;
            }
            log.trace("Next input record: {} (class {}) from file position {}", element.toString(),
                    element.getClass().toString(), elementOffset.toString());

            if (recordSchema == null) {
                recordSchema = ConvertMappingSchema(element.keySet());
                log.trace("recordSchema created from element; type {}", recordSchema.type().toString());
            }

            if (records == null)
                records = new ArrayList<>();
            records.add(new SourceRecord(offsetKey(filename), offsetValue(elementOffset), topic, recordSchema,
                    ConvertMappingElement(recordSchema, (HashMap<?, ?>) element)));
            streamOffset = lastElementOffset = elementOffset;
        }
    } catch (Exception e) {
        throw new ConnectException(e);
    }

    lastPollTime = currentTime;
    return records;
}

From source file:org.apache.nifi.processors.ParseCSV.ParseCSV.java

public static List<Map<?, ?>> readObjectsFromCsv(InputStream is) throws IOException {
    CsvSchema bootstrap = CsvSchema.emptySchema().withHeader();
    CsvMapper csvMapper = new CsvMapper();
    MappingIterator<Map<?, ?>> mappingIterator = csvMapper.reader(Map.class).with(bootstrap).readValues(is);

    return mappingIterator.readAll();
}

From source file:com.marklogic.client.test.SPARQLManagerTest.java

private MappingIterator<Map<String, String>> parseCsv(String csv) throws JsonProcessingException, IOException {
    return new CsvMapper().reader(Map.class).with(CsvSchema.emptySchema().withHeader()) // use first row as header
            .readValues(csv);//from  ww  w.ja v a  2  s . c  om
}