Example usage for com.google.common.net MediaType CSV_UTF_8

List of usage examples for com.google.common.net MediaType CSV_UTF_8

Introduction

In this page you can find the example usage for com.google.common.net MediaType CSV_UTF_8.

Prototype

MediaType CSV_UTF_8

To view the source code for com.google.common.net MediaType CSV_UTF_8.

Click Source Link

Usage

From source file:org.haiku.haikudepotserver.pkg.job.PkgIconImportArchiveJobRunner.java

@Override
public void run(JobService jobService, PkgIconImportArchiveJobSpecification specification)
        throws IOException, JobRunnerException {

    Preconditions.checkArgument(null != jobService);
    Preconditions.checkArgument(null != specification);
    Preconditions.checkArgument(null != specification.getInputDataGuid(),
            "missing input data guid on specification");

    // this will register the outbound data against the job.
    JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download",
            MediaType.CSV_UTF_8.toString());

    Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService
            .tryObtainData(specification.getInputDataGuid());

    if (!jobDataWithByteSourceOptional.isPresent()) {
        throw new IllegalStateException(
                "the job data was not able to be found for guid; " + specification.getInputDataGuid());
    }/*www.j  a  va2s . c om*/

    if (!serverRuntime.performInTransaction(() -> {

        try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
                OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
                CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {

            String[] headings = new String[] { "path", "action", "message" };
            writer.writeNext(headings);

            // make a first sweep to delete all existing icons for packages in the spreadsheet.

            try (InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
                    GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream);
                    TarArchiveInputStream tarArchiveInputStream = new TarArchiveInputStream(gzipInputStream)) {
                clearPackagesIconsAppearingInArchive(tarArchiveInputStream, writer);
            }

            // now load the icons in.

            try (InputStream inputStream = jobDataWithByteSourceOptional.get().getByteSource().openStream();
                    GZIPInputStream gzipInputStream = new GZIPInputStream(inputStream);
                    TarArchiveInputStream tarArchiveInputStream = new TarArchiveInputStream(gzipInputStream)) {
                processEntriesFromArchive(tarArchiveInputStream, writer);
            }

            return true;
        } catch (IOException e) {
            LOGGER.error("unable to complete job; ", e);
        }

        return false;
    })) {
        throw new JobRunnerException("unable to complete job");
    }
}

From source file:org.haiku.haikudepotserver.pkg.job.PkgVersionLocalizationCoverageExportSpreadsheetJobRunner.java

@Override
public void run(final JobService jobService,
        final PkgVersionLocalizationCoverageExportSpreadsheetJobSpecification specification)
        throws IOException, JobRunnerException {

    Preconditions.checkArgument(null != jobService);
    Preconditions.checkArgument(null != specification);

    final ObjectContext context = serverRuntime.newContext();

    final List<NaturalLanguage> naturalLanguages = getNaturalLanguages(context);
    final List<Architecture> architectures = Architecture.getAllExceptByCode(context,
            Collections.singleton(Architecture.CODE_SOURCE));

    if (naturalLanguages.isEmpty()) {
        throw new RuntimeException("there appear to be no natural languages in the system");
    }/* www  .  j  a v  a  2  s  .c o m*/

    // this will register the outbound data against the job.
    JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download",
            MediaType.CSV_UTF_8.toString());

    try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
            OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
            CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {

        final String[] cells = new String[4 + naturalLanguages.size()];

        // headers

        {
            int c = 0;

            cells[c++] = "pkg-name";
            cells[c++] = "repository";
            cells[c++] = "architecture";
            cells[c++] = "latest-version-coordinates";

            for (NaturalLanguage naturalLanguage : naturalLanguages) {
                cells[c++] = naturalLanguage.getCode();
            }
        }

        long startMs = System.currentTimeMillis();

        writer.writeNext(cells);

        // stream out the packages.

        final long expectedTotal = pkgService.totalPkg(context, false);
        final AtomicLong counter = new AtomicLong(0);

        LOGGER.info("will produce package version localization report for {} packages", expectedTotal);

        long count = pkgService.eachPkg(context, false, // allow source only.
                pkg -> {

                    for (Repository repository : repositoryService.getRepositoriesForPkg(context, pkg)) {

                        for (Architecture architecture : architectures) {

                            Optional<PkgVersion> pkgVersionOptional = pkgService.getLatestPkgVersionForPkg(
                                    context, pkg, repository, Collections.singletonList(architecture));

                            if (pkgVersionOptional.isPresent()) {
                                int c = 0;

                                cells[c++] = pkg.getName();
                                cells[c++] = pkgVersionOptional.get().getRepositorySource().getRepository()
                                        .getCode();
                                cells[c++] = architecture.getCode();
                                cells[c++] = pkgVersionOptional.get().toVersionCoordinates().toString();

                                for (NaturalLanguage naturalLanguage : naturalLanguages) {
                                    Optional<PkgVersionLocalization> pkgVersionLocalizationOptional = pkgVersionOptional
                                            .get().getPkgVersionLocalization(naturalLanguage);
                                    cells[c++] = pkgVersionLocalizationOptional.isPresent() ? MARKER : "";
                                }

                                writer.writeNext(cells);

                            }
                        }

                    }

                    jobService.setJobProgressPercent(specification.getGuid(),
                            (int) ((100 * counter.incrementAndGet()) / expectedTotal));

                    return true; // keep going!
                });

        LOGGER.info("did produce pkg version localization coverage spreadsheet report for {} packages in {}ms",
                count, System.currentTimeMillis() - startMs);

    }

}

From source file:org.haiku.haikudepotserver.pkg.job.PkgScreenshotImportArchiveJobRunner.java

@Override
public void run(JobService jobService, PkgScreenshotImportArchiveJobSpecification specification)
        throws IOException, JobRunnerException {

    Preconditions.checkArgument(null != jobService);
    Preconditions.checkArgument(null != specification);
    Preconditions.checkArgument(null != specification.getInputDataGuid(),
            "missing input data guid on specification");
    Preconditions.checkArgument(null != specification.getImportStrategy(),
            "missing import strategy on specification");

    // this will register the outbound data against the job.
    JobDataWithByteSink jobDataWithByteSink = jobService.storeGeneratedData(specification.getGuid(), "download",
            MediaType.CSV_UTF_8.toString());

    Optional<JobDataWithByteSource> jobDataWithByteSourceOptional = jobService
            .tryObtainData(specification.getInputDataGuid());

    if (!jobDataWithByteSourceOptional.isPresent()) {
        throw new IllegalStateException(
                "the job data was not able to be found for guid; " + specification.getInputDataGuid());
    }//from w  w  w  .  ja va 2  s. co  m

    if (!serverRuntime.performInTransaction(() -> {
        try (OutputStream outputStream = jobDataWithByteSink.getByteSink().openBufferedStream();
                OutputStreamWriter outputStreamWriter = new OutputStreamWriter(outputStream);
                CSVWriter writer = new CSVWriter(outputStreamWriter, ',')) {

            Map<String, ScreenshotImportMetadatas> metadatas = new HashMap<>();

            writer.writeNext(new String[] { "path", "pkg-name", "action", "message", "code" });

            // sweep through and collect meta-data about the packages in the tar file.
            LOGGER.info("will collect data about packages' screenshots from the archive", metadatas.size());
            consumeScreenshotArchiveEntries(jobDataWithByteSourceOptional.get().getByteSource(),
                    (ae) -> collectScreenshotMetadataFromArchive(metadatas, ae.getArchiveInputStream(),
                            ae.getArchiveEntry(), ae.getPkgName(), ae.getOrder()));
            LOGGER.info("did collect data about {} packages' screenshots from the archive", metadatas.size());

            LOGGER.info("will collect data about persisted packages' screenshots");
            collectPersistedScreenshotMetadata(metadatas);
            LOGGER.info("did collect data about persisted packages' screenshots");

            if (specification
                    .getImportStrategy() == PkgScreenshotImportArchiveJobSpecification.ImportStrategy.REPLACE) {
                LOGGER.info("will delete persisted screenshots that are absent from the archive");
                int deleted = deletePersistedScreenshotsThatAreNotPresentInArchiveAndReport(writer,
                        metadatas.values());
                LOGGER.info("did delete {} persisted screenshots that are absent from the archive", deleted);
            }

            blendInArtificialOrderings(metadatas.values());

            // sweep through the archive again and load in those screenshots that are not already present.
            // The ordering of the inbound data should be preserved.
            LOGGER.info("will load screenshots from archive", metadatas.size());
            consumeScreenshotArchiveEntries(jobDataWithByteSourceOptional.get().getByteSource(),
                    (ae) -> importScreenshotsFromArchiveAndReport(writer, metadatas.get(ae.getPkgName()),
                            ae.getArchiveInputStream(), ae.getArchiveEntry(), ae.getPkgName(), ae.getOrder()));
            LOGGER.info("did load screenshots from archive", metadatas.size());
            return true;
        } catch (IOException e) {
            LOGGER.error("unable to complete the job", e);
        }

        return false;
    })) {
        throw new JobRunnerException("unable to complete job");
    }
}

From source file:uk.org.rbc1b.roms.controller.report.ReportsController.java

/**
 * Run a fixed report, returning the data in a downloadable csv format.
 * @param reportId report id/*from w ww . j  a va  2s  . c  om*/
 * @param response servlet response to output the csv data to directly
 * @throws IOException on failure to write to output stream
 */
@RequestMapping(value = "fixed/{reportId}/csv", method = RequestMethod.GET)
@PreAuthorize("hasPermission('REPORT', 'READ')")
public void downloadCsvReport(@PathVariable Integer reportId, HttpServletResponse response) throws IOException {

    FixedReport fixedReport = reportDao.findFixedReport(reportId);
    if (fixedReport == null) {
        throw new ResourceNotFoundException("No fixed report #" + reportId);
    }

    ReportResults reportResults;
    try {
        reportResults = extractResults(fixedReport.getQuery());
    } catch (SQLException e) {
        throw new IllegalStateException("Failed to extract report data. Message: [" + e.getMessage() + "]", e);
    }

    String[] headers = reportResults.columnNames.toArray(new String[reportResults.columnNames.size()]);

    List<String[]> records = new ArrayList<String[]>();
    for (List<String> reportRow : reportResults.resultRows) {
        records.add(reportRow.toArray(new String[reportRow.size()]));
    }

    String fileName = "edifice-report-" + fixedReport.getName().replace(" ", "-").toLowerCase() + ".csv";

    response.setContentType(MediaType.CSV_UTF_8.toString());
    response.setHeader("Content-Disposition", "attachment; filename=\"" + fileName + "\"");

    OutputStream output = response.getOutputStream();
    CSVWriter writer = new CSVWriter(new OutputStreamWriter(output), '\u0009');

    writer.writeNext(headers);
    writer.writeAll(records);

    writer.close();

}

From source file:org.graylog2.restclient.models.UniversalSearch.java

public InputStream searchAsCsv(Set<String> selectedFields) throws IOException, APIException {
    final ApiRequestBuilder<String> builder = getSearchRequestBuilder(String.class, Integer.MAX_VALUE,
            selectedFields);//from w w w  .ja v  a2  s  . c o m
    return builder.accept(MediaType.CSV_UTF_8).timeout(KEITH, TimeUnit.SECONDS).expect(200, 400)
            .executeStreaming();
}

From source file:org.haiku.haikudepotserver.job.controller.JobController.java

/**
 * <p>This URL can be used to download job data that has resulted from a job being run.</p>
 *///  w w w .  j  ava 2 s  .  co m

@RequestMapping(value = "/" + SEGMENT_JOBDATA + "/{" + KEY_GUID + "}/"
        + SEGMENT_DOWNLOAD, method = RequestMethod.GET)
public void downloadGeneratedData(HttpServletRequest request, HttpServletResponse response,
        @PathVariable(value = KEY_GUID) String guid) throws IOException {

    Preconditions.checkArgument(PATTERN_GUID.matcher(guid).matches(),
            "the supplied guid does not match the required pattern");

    ObjectContext context = serverRuntime.newContext();

    JobSnapshot job = jobService.tryGetJobForData(guid).orElseThrow(() -> {
        LOGGER.warn("attempt to access job data {} for which no job exists", guid);
        return new JobDataAuthorizationFailure();
    });

    // If there is no user who is assigned to the job then the job is for nobody in particular and is thereby
    // secured by the GUID of the job's data; if you know the GUID then you can have the data.

    if (!Strings.isNullOrEmpty(job.getOwnerUserNickname())) {

        User user = tryObtainAuthenticatedUser(context).orElseThrow(() -> {
            LOGGER.warn("attempt to obtain job data {} with no authenticated user", guid);
            return new JobDataAuthorizationFailure();
        });

        User ownerUser = User.tryGetByNickname(context, job.getOwnerUserNickname()).orElseThrow(() -> {
            LOGGER.warn("owner of job does not seem to exist; {}", job.getOwnerUserNickname());
            return new JobDataAuthorizationFailure();
        });

        if (!authorizationService.check(context, user, ownerUser, Permission.USER_VIEWJOBS)) {
            LOGGER.warn("attempt to access jobs view for; {}", job.toString());
            throw new JobDataAuthorizationFailure();
        }
    } else {
        LOGGER.debug("access to job [{}] allowed for unauthenticated access", job.toString());
    }

    JobDataWithByteSource jobDataWithByteSink = jobService.tryObtainData(guid).orElseThrow(() -> {
        LOGGER.warn("requested job data {} not found", guid);
        return new JobDataAuthorizationFailure();
    });

    // finally access has been checked and the logic can move onto actual
    // delivery of the material.

    JobData jobData = jobDataWithByteSink.getJobData();

    if (!Strings.isNullOrEmpty(jobData.getMediaTypeCode())) {
        response.setContentType(jobData.getMediaTypeCode());
    } else {
        response.setContentType(MediaType.OCTET_STREAM.toString());
    }

    response.setContentType(MediaType.CSV_UTF_8.toString());
    response.setHeader(HttpHeaders.CONTENT_DISPOSITION,
            "attachment; filename=" + jobService.deriveDataFilename(guid));
    response.setDateHeader(HttpHeaders.EXPIRES, 0);
    response.setHeader(HttpHeaders.CACHE_CONTROL, "no-cache");

    // now switch to async for the delivery of the data.

    AsyncContext async = request.startAsync();
    async.setTimeout(TIMEOUT_DOWNLOAD_MILLIS);
    ServletOutputStream outputStream = response.getOutputStream();
    outputStream.setWriteListener(new JobDataWriteListener(guid, jobService, async, outputStream));

    LOGGER.info("did start async stream job data; {}", guid);

}

From source file:controllers.SearchController.java

public Result exportAsCsv(String q, String filter, String rangeType, int relative, String from, String to,
        String keyword, String fields) {
    UniversalSearch search;//from  w  w w .jav a 2 s  .  c o m
    try {
        search = getSearch(q, filter.isEmpty() ? null : filter, rangeType, relative, from, to, keyword, 0,
                UniversalSearch.DEFAULT_SORT);
    } catch (InvalidRangeParametersException e2) {
        return status(400, views.html.errors.error.render("Invalid range parameters provided.", e2, request()));
    } catch (IllegalArgumentException e1) {
        return status(400, views.html.errors.error.render("Invalid range type provided.", e1, request()));
    }

    final InputStream stream;
    try {
        Set<String> selectedFields = getSelectedFields(fields);
        stream = search.searchAsCsv(selectedFields);
    } catch (IOException e) {
        return status(504, views.html.errors.error.render(ApiClient.ERROR_MSG_IO, e, request()));
    } catch (APIException e) {
        String message = "There was a problem with your search. We expected HTTP 200, but got a HTTP "
                + e.getHttpCode() + ".";
        return status(504, views.html.errors.error.render(message, e, request()));
    }

    response().setContentType(MediaType.CSV_UTF_8.toString());
    response().setHeader("Content-Disposition", "attachment; filename=graylog-searchresult.csv");
    return ok(stream);
}

From source file:org.haiku.haikudepotserver.job.LocalJobServiceImpl.java

@Override
public String deriveDataFilename(String jobDataGuid) {
    Preconditions.checkArgument(!Strings.isNullOrEmpty(jobDataGuid));

    String descriptor = "jobdata";
    String extension = "dat";

    Optional<JobData> jobDataOptional = tryGetData(jobDataGuid);

    if (jobDataOptional.isPresent()) {

        JobData jobData = jobDataOptional.get();
        Optional<? extends JobSnapshot> jobOptional = tryGetJobForData(jobDataGuid);

        if (jobOptional.isPresent()) {
            descriptor = jobOptional.get().getJobTypeCode();
        }// w  w w. j a  va 2s.co m

        // TODO; get the extensions from a file etc...
        if (!Strings.isNullOrEmpty(jobData.getMediaTypeCode())) {
            if (jobData.getMediaTypeCode().startsWith(MediaType.CSV_UTF_8.withoutParameters().toString())) {
                extension = "csv";
            }

            if (jobData.getMediaTypeCode().equals(MediaType.ZIP.withoutParameters().toString())) {
                extension = "zip";
            }

            if (jobData.getMediaTypeCode().equals(MediaType.TAR.withoutParameters().toString())) {
                extension = "tgz";
            }

            if (jobData.getMediaTypeCode().equals(MediaType.PLAIN_TEXT_UTF_8.withoutParameters().toString())) {
                extension = "txt";
            }
        }
    }

    return String.format("hds_%s_%s_%s.%s", descriptor,
            DateTimeHelper.create14DigitDateTimeFormat().format(Instant.now()), jobDataGuid.substring(0, 4),
            extension);
}