Example usage for org.apache.commons.csv CSVFormat DEFAULT

List of usage examples for org.apache.commons.csv CSVFormat DEFAULT

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat DEFAULT.

Prototype

CSVFormat DEFAULT

To view the source code for org.apache.commons.csv CSVFormat DEFAULT.

Click Source Link

Document

Standard comma separated format, as for #RFC4180 but allowing empty lines.

Usage

From source file:org.apache.ambari.view.hive.resources.jobs.JobService.java

/**
 * Get job results in csv format/* w w  w .  j av  a 2s  . c om*/
 */
@GET
@Path("{jobId}/results/csv/saveToHDFS")
@Produces(MediaType.APPLICATION_JSON)
public Response getResultsToHDFS(@PathParam("jobId") String jobId, @QueryParam("commence") String commence,
        @QueryParam("file") final String targetFile, @QueryParam("stop") final String stop,
        @QueryParam("columns") final String requestedColumns, @Context HttpServletResponse response) {
    try {
        final JobController jobController = getResourceManager().readController(jobId);

        String backgroundJobId = "csv" + String.valueOf(jobController.getJob().getId());
        if (commence != null && commence.equals("true")) {
            if (targetFile == null)
                throw new MisconfigurationFormattedException("targetFile should not be empty");
            BackgroundJobController.getInstance(context).startJob(String.valueOf(backgroundJobId),
                    new Runnable() {
                        @Override
                        public void run() {

                            try {
                                Cursor resultSet = jobController.getResults();
                                resultSet.selectColumns(requestedColumns);

                                FSDataOutputStream stream = getSharedObjectsFactory().getHdfsApi()
                                        .create(targetFile, true);
                                Writer writer = new BufferedWriter(new OutputStreamWriter(stream));
                                CSVPrinter csvPrinter = new CSVPrinter(writer, CSVFormat.DEFAULT);
                                try {
                                    while (resultSet.hasNext() && !Thread.currentThread().isInterrupted()) {
                                        csvPrinter.printRecord(resultSet.next().getRow());
                                        writer.flush();
                                    }
                                } finally {
                                    writer.close();
                                }
                                stream.close();

                            } catch (IOException e) {
                                throw new ServiceFormattedException(
                                        "Could not write CSV to HDFS for job#" + jobController.getJob().getId(),
                                        e);
                            } catch (InterruptedException e) {
                                throw new ServiceFormattedException(
                                        "Could not write CSV to HDFS for job#" + jobController.getJob().getId(),
                                        e);
                            } catch (ItemNotFound itemNotFound) {
                                throw new NotFoundFormattedException("Job results are expired", itemNotFound);
                            }

                        }
                    });
        }

        if (stop != null && stop.equals("true")) {
            BackgroundJobController.getInstance(context).interrupt(backgroundJobId);
        }

        JSONObject object = new JSONObject();
        object.put("stopped", BackgroundJobController.getInstance(context).isInterrupted(backgroundJobId));
        object.put("jobId", jobController.getJob().getId());
        object.put("backgroundJobId", backgroundJobId);
        object.put("operationType", "CSV2HDFS");
        object.put("status", BackgroundJobController.getInstance(context).state(backgroundJobId).toString());

        return Response.ok(object).build();
    } catch (WebApplicationException ex) {
        throw ex;
    } catch (ItemNotFound itemNotFound) {
        throw new NotFoundFormattedException(itemNotFound.getMessage(), itemNotFound);
    } catch (Exception ex) {
        throw new ServiceFormattedException(ex.getMessage(), ex);
    }
}

From source file:org.apache.batchee.csv.CSVFormatFactory.java

static CSVFormat newFormat(final String format, final String delimiter, final String quoteCharacter,
        final String quoteMode, final String commentMarker, final String escapeCharacter,
        final String ignoreSurroundingSpaces, final String ignoreEmptyLines, final String recordSeparator,
        final String nullString, final String headerComments, final String header,
        final String skipHeaderRecord, final String allowMissingColumnNames, final String readHeaders) {
    //CHECKSTYLE:ON
    CSVFormat out = format == null ? CSVFormat.DEFAULT : CSVFormat.valueOf(format);
    if (delimiter != null) {
        out = out.withDelimiter(delimiter.charAt(0));
    }/*from w  ww  . j  a v  a  2 s .c  o m*/
    if (quoteCharacter != null) {
        out = out.withQuote(quoteCharacter.charAt(0));
    }
    if (quoteMode != null) {
        out = out.withQuoteMode(QuoteMode.valueOf(quoteMode));
    }
    if (commentMarker != null) {
        out = out.withCommentMarker(commentMarker.charAt(0));
    }
    if (escapeCharacter != null) {
        out = out.withEscape(escapeCharacter.charAt(0));
    }
    if (ignoreSurroundingSpaces != null) {
        out = out.withIgnoreSurroundingSpaces(Boolean.parseBoolean(ignoreSurroundingSpaces));
    }
    if (ignoreEmptyLines != null) {
        out = out.withIgnoreEmptyLines(Boolean.parseBoolean(ignoreEmptyLines));
    }
    if (recordSeparator != null) {
        if ("\\n".equals(recordSeparator)) {
            out = out.withRecordSeparator('\n');
        } else if ("\\r\\n".equals(recordSeparator)) {
            out = out.withRecordSeparator("\r\n");
        } else {
            out = out.withRecordSeparator(recordSeparator);
        }
    }
    if (nullString != null) {
        out = out.withNullString(nullString);
    }
    if (headerComments != null && !headerComments.trim().isEmpty()) {
        out = out.withHeaderComments(headerComments.split(" *, *"));
    }
    if (Boolean.parseBoolean(readHeaders)) {
        out = out.withHeader();
    }
    if (header != null && !header.trim().isEmpty()) {
        try { // headers can have CSV header names so parse it there
            final Iterator<CSVRecord> iterator = out.withHeader(new String[0])
                    .parse(new StringReader(header + '\n' + header)).iterator();
            final CSVRecord record = iterator.next();
            final List<String> list = new ArrayList<String>(record.size());
            for (final String h : record) {
                list.add(h);
            }
            out = out.withHeader(list.toArray(new String[record.size()]));
        } catch (final IOException e) { // can't occur actually
            out = out.withHeader(header.split(" *, *"));
        }
    }
    if (skipHeaderRecord != null) {
        out = out.withSkipHeaderRecord(Boolean.parseBoolean(skipHeaderRecord));
    }
    if (allowMissingColumnNames != null) {
        out = out.withAllowMissingColumnNames(Boolean.parseBoolean(allowMissingColumnNames));
    }
    return out;
}

From source file:org.apache.beam.sdk.extensions.sql.impl.schema.kafka.BeamKafkaCSVTable.java

public BeamKafkaCSVTable(BeamRecordSqlType beamSqlRowType, String bootstrapServers, List<String> topics) {
    this(beamSqlRowType, bootstrapServers, topics, CSVFormat.DEFAULT);
}

From source file:org.apache.beam.sdk.extensions.sql.impl.schema.kafka.BeamKafkaCSVTableTest.java

@Test
public void testCsvRecorderDecoder() throws Exception {
    PCollection<BeamRecord> result = pipeline.apply(Create.of("1,\"1\",1.0", "2,2,2.0"))
            .apply(ParDo.of(new String2KvBytes()))
            .apply(new BeamKafkaCSVTable.CsvRecorderDecoder(genRowType(), CSVFormat.DEFAULT));

    PAssert.that(result).containsInAnyOrder(row1, row2);

    pipeline.run();//from  w w  w.ja v  a 2 s.  co m
}

From source file:org.apache.beam.sdk.extensions.sql.impl.schema.kafka.BeamKafkaCSVTableTest.java

@Test
public void testCsvRecorderEncoder() throws Exception {
    PCollection<BeamRecord> result = pipeline.apply(Create.of(row1, row2))
            .apply(new BeamKafkaCSVTable.CsvRecorderEncoder(genRowType(), CSVFormat.DEFAULT))
            .apply(new BeamKafkaCSVTable.CsvRecorderDecoder(genRowType(), CSVFormat.DEFAULT));

    PAssert.that(result).containsInAnyOrder(row1, row2);

    pipeline.run();/*w ww  .ja  va2  s .  c  o m*/
}

From source file:org.apache.beam.sdk.extensions.sql.impl.schema.text.BeamTextCSVTable.java

/**
 * CSV table with {@link CSVFormat#DEFAULT DEFAULT} format.
 */// www.j a v a  2 s.c  o m
public BeamTextCSVTable(BeamRecordSqlType beamSqlRowType, String filePattern) {
    this(beamSqlRowType, filePattern, CSVFormat.DEFAULT);
}

From source file:org.apache.beam.sdk.extensions.sql.impl.schema.text.BeamTextCSVTableTest.java

/**
 * Helper that writes the given lines (adding a newline in between) to a stream, then closes the
 * stream.//  w w w.  j av a2 s .co m
 */
private static void writeToStreamAndClose(List<Object[]> rows, OutputStream outputStream) {
    try (PrintStream writer = new PrintStream(outputStream)) {
        CSVPrinter printer = CSVFormat.DEFAULT.print(writer);
        for (Object[] row : rows) {
            for (Object field : row) {
                printer.print(field);
            }
            printer.println();
        }
    } catch (IOException e) {
        e.printStackTrace();
    }
}

From source file:org.apache.beam.sdk.extensions.sql.meta.provider.kafka.BeamKafkaCSVTable.java

public BeamKafkaCSVTable(Schema beamSchema, String bootstrapServers, List<String> topics) {
    this(beamSchema, bootstrapServers, topics, CSVFormat.DEFAULT);
}

From source file:org.apache.beam.sdk.extensions.sql.meta.provider.kafka.BeamKafkaCSVTableTest.java

@Test
public void testCsvRecorderDecoder() throws Exception {
    PCollection<Row> result = pipeline.apply(Create.of("1,\"1\",1.0", "2,2,2.0"))
            .apply(ParDo.of(new String2KvBytes()))
            .apply(new BeamKafkaCSVTable.CsvRecorderDecoder(genSchema(), CSVFormat.DEFAULT));

    PAssert.that(result).containsInAnyOrder(ROW1, ROW2);

    pipeline.run();//from  ww  w  .  j a v  a2s  . c  o m
}

From source file:org.apache.beam.sdk.extensions.sql.meta.provider.kafka.BeamKafkaCSVTableTest.java

@Test
public void testCsvRecorderEncoder() throws Exception {
    PCollection<Row> result = pipeline.apply(Create.of(ROW1, ROW2))
            .apply(new BeamKafkaCSVTable.CsvRecorderEncoder(genSchema(), CSVFormat.DEFAULT))
            .apply(new BeamKafkaCSVTable.CsvRecorderDecoder(genSchema(), CSVFormat.DEFAULT));

    PAssert.that(result).containsInAnyOrder(ROW1, ROW2);

    pipeline.run();/*from www. ja  va2  s  .c  om*/
}