Example usage for org.apache.commons.csv CSVFormat DEFAULT

List of usage examples for org.apache.commons.csv CSVFormat DEFAULT

Introduction

In this page you can find the example usage for org.apache.commons.csv CSVFormat DEFAULT.

Prototype

CSVFormat DEFAULT

To view the source code for org.apache.commons.csv CSVFormat DEFAULT.

Click Source Link

Document

Standard comma separated format, as for #RFC4180 but allowing empty lines.

Usage

From source file:org.logstash.dependencies.ReportGenerator.java

public boolean generateReport(InputStream licenseMappingStream, InputStream acceptableLicensesStream,
        InputStream rubyDependenciesStream, InputStream[] javaDependenciesStreams, Writer output)
        throws IOException {

    SortedSet<Dependency> dependencies = new TreeSet<>();
    Dependency.addDependenciesFromRubyReport(rubyDependenciesStream, dependencies);

    for (InputStream stream : javaDependenciesStreams) {
        Dependency.addDependenciesFromJavaReport(stream, dependencies);
    }/*from w ww  . ja va 2s  . co  m*/

    Map<String, LicenseUrlPair> licenseMapping = new HashMap<>();
    readLicenseMapping(licenseMappingStream, licenseMapping);
    List<String> acceptableLicenses = new ArrayList<>();
    readAcceptableLicenses(acceptableLicensesStream, acceptableLicenses);
    for (Dependency dependency : dependencies) {
        String nameAndVersion = dependency.name + ":" + dependency.version;
        if (licenseMapping.containsKey(nameAndVersion)) {
            LicenseUrlPair pair = licenseMapping.get(nameAndVersion);

            if (pair.url != null && !pair.url.equals("")
                    && (acceptableLicenses.stream().anyMatch(pair.license::equalsIgnoreCase))) {
                dependency.spdxLicense = pair.license;
                dependency.url = pair.url;
            } else {
                // unacceptable license or missing URL
                UNKNOWN_LICENSES.add(dependency);
            }
        } else {
            dependency.spdxLicense = UNKNOWN_LICENSE;
            UNKNOWN_LICENSES.add(dependency);
        }
    }

    try (CSVPrinter csvPrinter = new CSVPrinter(output, CSVFormat.DEFAULT.withHeader(CSV_HEADERS))) {
        for (Dependency dependency : dependencies) {
            csvPrinter.printRecord(dependency.toCsvReportRecord());
        }
        csvPrinter.flush();
    }

    String msg = "Generated report with %d dependencies (%d unknown or unacceptable licenses).";
    System.out.println(String.format(msg + "\n", dependencies.size(), UNKNOWN_LICENSES.size()));

    if (UNKNOWN_LICENSES.size() > 0) {
        String errMsg = "Add complying licenses (using the SPDX license ID from https://spdx.org/licenses) "
                + "with URLs for the libraries listed below to tools/dependencies-report/src/main/resources/"
                + "licenseMapping.csv:";
        System.out.println(errMsg);
        for (Dependency dependency : UNKNOWN_LICENSES) {
            System.out.println(String.format("\"%s:%s\"", dependency.name, dependency.version));
        }
    }

    return UNKNOWN_LICENSES.size() == 0;
}

From source file:org.logstash.dependencies.ReportGenerator.java

private void readAcceptableLicenses(InputStream stream, List<String> acceptableLicenses) throws IOException {
    Reader in = new InputStreamReader(stream);
    for (CSVRecord record : CSVFormat.DEFAULT.parse(in)) {
        String acceptableLicenseId = record.get(0);
        if (acceptableLicenseId != null && !acceptableLicenseId.equals("")) {
            acceptableLicenses.add(acceptableLicenseId);
        }//from www  . ja  va2  s .co m
    }
}

From source file:org.logstash.dependencies.ReportGenerator.java

private void readLicenseMapping(InputStream stream, Map<String, LicenseUrlPair> licenseMapping)
        throws IOException {
    Reader in = new InputStreamReader(stream);
    for (CSVRecord record : CSVFormat.DEFAULT.withFirstRecordAsHeader().parse(in)) {
        String dependencyNameAndVersion = record.get(0);
        if (dependencyNameAndVersion != null && !dependencyNameAndVersion.equals("")) {
            licenseMapping.put(dependencyNameAndVersion, new LicenseUrlPair(record.get(2), record.get(1)));
        }/* www .j a  v a  2 s . c  om*/
    }
}

From source file:org.mariotaku.twidere.util.StatisticUtils.java

public static void writeStatusOpen(ParcelableStatus status, Location location, int signalStrength)
        throws IOException {
    final LogWriter writer = new LogWriter("Twidere");
    final CSVPrinter printer = CSVFormat.DEFAULT.print(writer);
    printer.printRecord(status.account_id, status.id, status.user_id, status.user_screen_name, status.text_html,
            fromStringLocation(location), signalStrength);
}

From source file:org.mercycorps.translationcards.txcmaker.GetTxcServlet.java

private void produceTxcJson(Drive drive, HttpServletRequest req, HttpServletResponse resp) throws IOException {
    TxcPortingUtility.ExportSpec exportSpec = new TxcPortingUtility.ExportSpec()
            .setDeckLabel(req.getParameter("deckName")).setPublisher(req.getParameter("publisher"))
            .setDeckId(req.getParameter("deckId")).setLicenseUrl(req.getParameter("licenseUrl"))
            .setLocked(req.getParameter("locked") != null);
    String spreadsheetFileId = req.getParameter("docId");
    Drive.Files.Export sheetExport = drive.files().export(spreadsheetFileId, CSV_EXPORT_TYPE);
    Reader reader = new InputStreamReader(sheetExport.executeMediaAsInputStream());
    CSVParser parser = new CSVParser(reader, CSVFormat.DEFAULT.withHeader());
    try {//  ww  w  .  j a va  2 s.  c o  m
        for (CSVRecord row : parser) {
            String language = row.get(SRC_HEADER_LANGUAGE);
            TxcPortingUtility.CardSpec card = new TxcPortingUtility.CardSpec()
                    .setLabel(row.get(SRC_HEADER_LABEL)).setFilename(row.get(SRC_HEADER_FILENAME))
                    .setTranslationText(row.get(SRC_HEADER_TRANSLATION_TEXT));
            exportSpec.addCard(language, card);
        }
    } finally {
        parser.close();
        reader.close();
    }
    resp.getWriter().println(TxcPortingUtility.buildTxcJson(exportSpec));
}

From source file:org.mitre.ptmatchadapter.fril.RecordMatchResultsBuilder.java

/**
 *
 * @param bundle/*from  w w w  .jav a  2s .  co m*/
 *          Bundle to which an entry for each linked record will be added
 * @throws IOException
 *           thrown when the file containing the linked results is not found
 *           or could not be processed
 */
private void addLinkedRecordEntries(Bundle bundle) throws IOException {
    if (duplicatesFile != null) {
        final Reader in = new FileReader(duplicatesFile);
        try {
            final Iterable<CSVRecord> records = CSVFormat.DEFAULT.parse(in);

            String refRecordUrl = null;
            String curDupId = "0";

            // see https://www.hl7.org/fhir/valueset-patient-mpi-match.html
            final CodeType certain = new CodeType("certain");
            final CodeType probable = new CodeType("probable");
            final CodeType possible = new CodeType("possible");
            final CodeType certainlyNot = new CodeType("certainly-not");

            for (CSVRecord record : records) {
                String duplicateId = record.get(DUPLICATE_ID_COL);
                String scoreStr = record.get(SCORE_COL);
                String fullUrl = record.get(FULL_URL_COL);

                if (curDupId.equals(duplicateId)) {
                    if (refRecordUrl == null) {
                        LOG.warn("Unexpected condition, curDupId {}, duplicateId {}", curDupId, duplicateId);
                        continue;
                    }

                    BundleEntryComponent entry = new BundleEntryComponent();
                    entry.setFullUrl(refRecordUrl);

                    BundleEntrySearchComponent search = new BundleEntrySearchComponent();
                    // fril returns results 0 - 100; normalize to 0 - 1;
                    double score = Double.valueOf(scoreStr).doubleValue() / 100.;
                    search.setScoreElement(new DecimalType(score));

                    // TODO Add Extension that maps score value to a term (e.g.,
                    // probable)
                    Extension searchExt = new Extension(
                            new UriType("http://hl7.org/fhir/StructureDefinition/patient-mpi-match"));
                    if (score > 0.85) {
                        searchExt.setValue(certain);
                    } else if (score > 0.65) {
                        searchExt.setValue(probable);
                    } else if (score > .45) {
                        searchExt.setValue(possible);
                    } else {
                        searchExt.setValue(certainlyNot);
                    }
                    search.addExtension(searchExt);
                    entry.setSearch(search);

                    // Add information about the resource type

                    BundleLinkComponent link = new BundleLinkComponent(new StringType("type"),
                            new UriType("http://hl7.org/fhir/Patient"));
                    entry.addLink(link);

                    // Add the link to the duplicate record
                    link = new BundleLinkComponent(new StringType("related"), new UriType(fullUrl));
                    entry.addLink(link);

                    bundle.addEntry(entry);
                } else {
                    // new set of duplicates
                    curDupId = duplicateId;
                    refRecordUrl = fullUrl;
                }
            }
        } finally {
            in.close();
        }
    }
}

From source file:org.n52.wps.csv2wiki.CSV2TWikiProcess.java

public String transform(InputStream csv) throws IOException {
    CSVParser parser = CSVFormat.DEFAULT.parse(new InputStreamReader(csv));
    return transformWithParser(parser);
}

From source file:org.n52.wps.csv2wiki.CSV2TWikiProcess.java

public String transform(String input) throws IOException {
    CSVParser parser = CSVFormat.DEFAULT.parse(new StringReader(input));
    return transformWithParser(parser);
}

From source file:org.nuxeo.ecm.csv.core.CSVImporterWork.java

@Override
public void work() {
    TransientStore store = getStore();/*from ww  w .  j a  va 2  s. com*/
    setStatus("Importing");
    openUserSession();
    CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader().withEscape(options.getEscapeCharacter())
            .withCommentMarker(options.getCommentMarker());
    try (Reader in = newReader(getBlob()); CSVParser parser = csvFormat.parse(in)) {
        doImport(parser);
    } catch (IOException e) {
        logError(0, "Error while doing the import: %s", LABEL_CSV_IMPORTER_ERROR_DURING_IMPORT, e.getMessage());
        log.debug(e, e);
    }
    store.putParameter(id, "logs", importLogs);
    if (options.sendEmail()) {
        setStatus("Sending email");
        sendMail();
    }
    setStatus(null);
}

From source file:org.nuxeo.ecm.directory.DirectoryCSVLoader.java

/**
 * Loads the CSV data file based on the provided schema, and creates the corresponding entries using the provided
 * loader./*from www  .  j a  v  a2  s .  c om*/
 *
 * @param dataFileName the file name containing CSV data
 * @param delimiter the CSV column separator
 * @param schema the data schema
 * @param loader the actual consumer of loaded rows
 * @since 8.4
 */
public static void loadData(String dataFileName, char delimiter, Schema schema,
        Consumer<Map<String, Object>> loader) throws DirectoryException {
    try (InputStream in = getResource(dataFileName); //
            CSVParser csvParser = new CSVParser(new InputStreamReader(in, "UTF-8"),
                    CSVFormat.DEFAULT.withDelimiter(delimiter).withHeader())) {
        Map<String, Integer> header = csvParser.getHeaderMap();

        List<Field> fields = new ArrayList<>();
        for (String columnName : header.keySet()) {
            Field field = schema.getField(columnName.trim());
            if (field == null) {
                throw new DirectoryException(
                        "Column not found: " + columnName + " in schema: " + schema.getName());
            }
            fields.add(field);
        }

        int lineno = 1; // header was first line
        for (CSVRecord record : csvParser) {
            lineno++;
            if (record.size() == 0 || record.size() == 1 && StringUtils.isBlank(record.get(0))) {
                // NXP-2538: allow columns with only one value but skip empty lines
                continue;
            }
            if (!record.isConsistent()) {
                log.error("Invalid column count while reading CSV file: " + dataFileName + ", line: " + lineno
                        + ", values: " + record);
                continue;
            }

            Map<String, Object> map = new HashMap<String, Object>();
            for (int i = 0; i < header.size(); i++) {
                Field field = fields.get(i);
                String value = record.get(i);
                Object v = CSV_NULL_MARKER.equals(value) ? null : decode(field, value);
                map.put(field.getName().getPrefixedName(), v);
            }
            loader.accept(map);
        }
    } catch (IOException e) {
        throw new DirectoryException("Read error while reading data file: " + dataFileName, e);
    }
}