Example usage for org.apache.commons.csv CSVFormat TDF

List of usage examples for org.apache.commons.csv CSVFormat TDF

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat TDF.

Prototype

CSVFormat TDF

To view the source code for org.apache.commons.csv CSVFormat TDF.

Click Source Link

Document

Tab-delimited format.

Usage

From source file:edu.caltech.ipac.firefly.server.util.DsvToDataGroup.java

public static void main(String[] args) {

    try {//  www  .  ja v  a 2 s . com
        File inf = new File(args[0]);
        DataGroup dg = parse(inf, CSVFormat.DEFAULT);
        IpacTableWriter.save(System.out, dg);
        write(new File(inf.getAbsolutePath() + ".csv"), dg);
        write(new File(inf.getAbsolutePath() + ".tsv"), dg, CSVFormat.TDF);
    } catch (Exception e) {
        e.printStackTrace();
    }

}

From source file:com.lithium.flow.util.CsvFormats.java

@Nonnull
public static CSVFormat fromConfig(@Nonnull Config config) {
    checkNotNull(config);/* w  w  w  .  j  a  v a 2s. co m*/
    switch (config.getString("csv.format", "default")) {
    case "default":
        return CSVFormat.DEFAULT;
    case "excel":
        return CSVFormat.EXCEL;
    case "mysql":
        return CSVFormat.MYSQL;
    case "rfc4180":
        return CSVFormat.RFC4180;
    case "tdf":
        return CSVFormat.TDF;
    case "custom":
        return CSVFormat.newFormat(getChar(config, "csv.delimiter", ','))
                .withAllowMissingColumnNames(getBoolean(config, "csv.allowMissingColumnNames"))
                .withCommentMarker(getChar(config, "csv.commentMarker"))
                .withEscape(getChar(config, "csv.escape")).withHeader(getHeader(config, "csv.header"))
                .withIgnoreEmptyLines(getBoolean(config, "csv.ignoreEmptyLines"))
                .withIgnoreSurroundingSpaces(getBoolean(config, "csv.ignoreSurroundingSpaces"))
                .withNullString(getString(config, "csv.nullString")).withQuote(getChar(config, "csv.quote"))
                .withQuoteMode(getQuoteMode(config, "csv.quoteMode"))
                .withRecordSeparator(getString(config, "csv.recordSeparator"))
                .withSkipHeaderRecord(getBoolean(config, "csv.skipHeaderRecord"));
    default:
        return CSVFormat.DEFAULT;
    }
}

From source file:moacscoper.Parser.java

private List<CSVRecord> parse(String path) throws IOException {
    return CSVParser.parse(new File(path), StandardCharsets.UTF_8, CSVFormat.TDF).getRecords();
}

From source file:io.scigraph.services.jersey.writers.TsvWriter.java

@Override
CSVPrinter getCsvPrinter(Writer writer) throws IOException {
    return new CSVPrinter(writer, CSVFormat.TDF);
}

From source file:com.compomics.cell_coord.parser.impl.TSVFileParser.java

@Override
public Sample parseTrackFile(File trackFile) throws FileParserException {
    // create a new sample object -- watch out to set the relationships!
    Sample sample = new Sample(trackFile.getName());
    // initialize an empty list of tracks
    List<Track> list = new ArrayList<>();
    CSVParser tsvFileParser;// w  w  w  .  j a v  a  2 s.c  o m
    FileReader fileReader;
    CSVFormat csvFileFormat = CSVFormat.TDF.withHeader(FILE_HEADER_MAPPING);
    try {
        // initialize the file reader
        fileReader = new FileReader(trackFile);
        //initialize CSVParser object
        tsvFileParser = new CSVParser(fileReader, csvFileFormat);
        // get the csv records
        List<CSVRecord> csvRecords = tsvFileParser.getRecords();
        Track currentTrack = null;
        List<TrackSpot> currentTrackPointList = new ArrayList<>();
        Long currentId = 0L;

        //Read the CSV file records starting from the second record to skip the header
        for (int i = 1; i < csvRecords.size(); i++) {
            CSVRecord cSVRecord = csvRecords.get(i);
            // get the fields
            Long trackid = Long.parseLong(cSVRecord.get(TRACK_ID));
            if (!Objects.equals(currentId, trackid)) {
                currentTrack = new Track();
                currentTrack.setTrackid(trackid);
                list.add(currentTrack);
                currentId = trackid;
                currentTrackPointList = new ArrayList<>();
            }
            // create new Track Spot object
            Long spotid = Long.parseLong(cSVRecord.get(SPOT_ID));
            double x = Double.parseDouble(cSVRecord.get(X_COORD));
            double y = Double.parseDouble(cSVRecord.get(Y_COORD));
            double time = Double.parseDouble(cSVRecord.get(TIME));
            TrackSpot trackSpot = new TrackSpot(spotid, x, y, time, currentTrack);
            currentTrackPointList.add(trackSpot);
            currentTrack.setTrackSpots(currentTrackPointList);
            currentTrack.setSample(sample);
        }
    } catch (IOException ex) {
        LOG.error(ex.getMessage(), ex);
    } catch (NumberFormatException ex) {
        LOG.error(ex.getMessage(), ex);
        throw new FileParserException(
                "It seems like a line does not contain a number!\nPlease check your files!");
    }
    sample.setTracks(list);
    return sample;
}

From source file:com.bigtester.ate.tcg.controller.TrainingFileDB.java

/**
 * Gets the CSV format./*from w ww.  j a  v a2s  .  com*/
 *
 * @return the CSV format
 * @throws IOException
 */
public static CSVFormat getCSVFormat() throws IOException {
    // Create the CSVFormat object with "\n" as a record delimiter
    CSVFormat csvFileFormat = CSVFormat.TDF // NOPMD
            .withRecordSeparator(NEW_LINE_SEPARATOR);
    csvFileFormat = csvFileFormat.withEscape('^');
    csvFileFormat = csvFileFormat.withQuoteMode(QuoteMode.NONE);
    if (null == csvFileFormat)
        throw new IOException();
    return csvFileFormat;
}

From source file:co.cask.hydrator.transforms.ParseCSV.java

@Override
public void initialize(TransformContext context) throws Exception {
    super.initialize(context);

    String csvFormatString = config.format.toLowerCase();
    switch (csvFormatString) {
    case "default":
        csvFormat = CSVFormat.DEFAULT;//ww w.j  a  v  a  2 s  .  c  o m
        break;

    case "excel":
        csvFormat = CSVFormat.EXCEL;
        break;

    case "mysql":
        csvFormat = CSVFormat.MYSQL;
        break;

    case "rfc4180":
        csvFormat = CSVFormat.RFC4180;
        break;

    case "tdf":
        csvFormat = CSVFormat.TDF;
        break;

    default:
        throw new IllegalArgumentException(
                "Format {} specified is not one of the allowed format. Allowed formats are"
                        + "DEFAULT, EXCEL, MYSQL, RFC4180 and TDF");
    }

    try {
        outSchema = Schema.parseJson(config.schema);
        fields = outSchema.getFields();
    } catch (IOException e) {
        throw new IllegalArgumentException("Format of schema specified is invalid. Please check the format.");
    }
}

From source file:co.cask.hydrator.transforms.CSVParser2.java

@Override
public void initialize(TransformContext context) throws Exception {
    super.initialize(context);

    String csvFormatString = config.format.toLowerCase();
    switch (csvFormatString) {
    case "default":
        csvFormat = CSVFormat.DEFAULT;// www.  ja va 2s  . c o m
        break;

    case "excel":
        csvFormat = CSVFormat.EXCEL;
        break;

    case "mysql":
        csvFormat = CSVFormat.MYSQL;
        break;

    case "rfc4180":
        csvFormat = CSVFormat.RFC4180;
        break;

    case "tdf":
        csvFormat = CSVFormat.TDF;
        break;

    default:
        throw new IllegalArgumentException(
                "Format {} specified is not one of the allowed format. Allowed formats are"
                        + "DEFAULT, EXCEL, MYSQL, RFC4180 and TDF");
    }

    if (config.field == null || config.field.isEmpty()) {
        throw new IllegalArgumentException("Field for applying transformation is not specified.");
    }

    try {
        outSchema = Schema.parseJson(config.schema);
        fields = outSchema.getFields();
    } catch (IOException e) {
        throw new IllegalArgumentException("Format of schema specified is invalid. Please check the format.");
    }
}

From source file:com.ibm.watson.developer_cloud.professor_languo.pipeline.evaluation.ResultWriter.java

@Override
public void initialize(Properties properties) {
    // Retrieve necessary properties
    String resultsFilePath = properties.getProperty(ConfigurationConstants.PIPELINE_RESULTS_TSV_FILE_PATH);
    this.format = PipelineResultsTsvFileFormats
            .valueOf(properties.getProperty(ConfigurationConstants.PIPELINE_RESULTS_TSV_FILE_FORMAT,
                    PipelineResultsTsvFileFormats.DEFAULT.toString()));

    // Make sure file path was actually specified
    if (resultsFilePath == null)
        throw new RuntimeException(MessageFormat.format(Messages.getString("RetrieveAndRank.MISSING_PROPERTY"), //$NON-NLS-1$
                ConfigurationConstants.PIPELINE_RESULTS_TSV_FILE_FORMAT));

    // Open a FileWriter, using CSV or TSV format depending on desired
    // output format
    try {/*www .  j  a v  a  2 s .co m*/
        writer = (this.format == PipelineResultsTsvFileFormats.COMPETITION)
                ? new CSVPrinter(new FileWriter(resultsFilePath), CSVFormat.DEFAULT)
                : new CSVPrinter(new FileWriter(resultsFilePath),
                        CSVFormat.TDF.withHeader(getHeaders(this.format)));
    } catch (IOException e) {
        throw new RuntimeException(new PipelineException(e));
    }
}

From source file:com.linkedin.pinot.core.data.readers.CSVRecordReader.java

private CSVFormat getFormatFromConfig() {
    String format = (_config != null) ? _config.getCsvFileFormat() : null;

    if (format == null) {
        return CSVFormat.DEFAULT;
    }/*from ww  w  . j a v  a2 s .co m*/

    format = format.toUpperCase();
    if ((format.equals("DEFAULT"))) {
        return CSVFormat.DEFAULT;

    } else if (format.equals("EXCEL")) {
        return CSVFormat.EXCEL;

    } else if (format.equals("MYSQL")) {
        return CSVFormat.MYSQL;

    } else if (format.equals("RFC4180")) {
        return CSVFormat.RFC4180;

    } else if (format.equals("TDF")) {
        return CSVFormat.TDF;
    } else {
        return CSVFormat.DEFAULT;
    }
}