List of usage examples for org.apache.commons.csv CSVFormat DEFAULT
CSVFormat DEFAULT
To view the source code for org.apache.commons.csv CSVFormat DEFAULT.
Click Source Link
From source file:edu.uiowa.icts.util.SummarizeListTest.java
@Test public void getSummeryFromCSVFile() throws IOException { File file = new File("src/test/resources/ClinicalData-BLCA-table.csv"); assertTrue(file.isFile());/* w w w . j av a 2 s. com*/ Reader in = new BufferedReader(new InputStreamReader(new FileInputStream(file))); Iterable<CSVRecord> records = CSVFormat.DEFAULT .withHeader("barcode", "Clinical:days_to_death", "Clinical:vital_status", "Clinical:anatomic_treatment_site", "Clinical:bcr_aliquot_barcode", "Clinical:bcr_radiation_barcode", "Clinical:bcr_radiation_uuid", "Clinical:analyte_type") .withSkipHeaderRecord(true).parse(in); List<String> values = new ArrayList<String>(); List<String> barcodeValues = new ArrayList<String>(); List<String> vitalStatus = new ArrayList<String>(); for (CSVRecord record : records) { values.add(record.get("Clinical:bcr_aliquot_barcode")); barcodeValues.add(record.get("barcode")); vitalStatus.add(record.get("Clinical:vital_status")); } SummarizeList sl = new SummarizeList(values, null, 2); assertEquals("Identifier: {# of Unique ID's = ".concat(new Integer(values.size()).toString()).concat("}"), sl.getSummary()); sl = new SummarizeList(barcodeValues, null, 2); assertEquals( "Identifier: {# of Unique ID's = ".concat(new Integer(barcodeValues.size()).toString()).concat("}"), sl.getSummary()); sl = new SummarizeList(vitalStatus, null, 2); assertEquals("Categorical: {LIVING=94, DECEASED=34}", sl.getSummary()); }
From source file:com.denkbares.semanticcore.utils.ResultTableModel.java
public String toCSV() throws IOException { StringWriter out = new StringWriter(); CSVPrinter printer = CSVFormat.DEFAULT.withHeader(variables.toArray(new String[variables.size()])) .print(out);//from w ww .j a va 2 s . co m for (TableRow row : rows) { List<Object> values = new ArrayList<>(variables.size()); for (String variable : variables) { Value value = row.getValue(variable); values.add(value == null ? null : value.stringValue()); } printer.printRecord(values); } return out.toString(); }
From source file:cz.lbenda.dataman.db.ExportTableData.java
/** Write rows to CSV file * @param sqlQueryRows rows//from w ww. jav a 2s .c o m * @param writer where are data */ public static void writeSqlQueryRowsToCSV(SQLQueryRows sqlQueryRows, Writer writer) throws IOException { CSVFormat csvFileFormat = CSVFormat.DEFAULT.withRecordSeparator(Constants.CSV_NEW_LINE_SEPARATOR); CSVPrinter csvFilePrinter = new CSVPrinter(writer, csvFileFormat); csvFilePrinter .printRecord(sqlQueryRows.getMetaData().getColumns().stream().map(ColumnDesc::getName).toArray()); for (RowDesc row : sqlQueryRows.getRows()) { csvFilePrinter.printRecord( sqlQueryRows.getMetaData().getColumns().stream().map(row::getColumnValueStr).toArray()); } }
From source file:com.rcv.ResultsWriter.java
private void generateSummarySpreadsheet(Map<Integer, Map<String, BigDecimal>> roundTallies, String precinct, String outputPath) throws IOException { String csvPath = outputPath + ".csv"; Logger.log(Level.INFO, "Generating summary spreadsheets: %s...", csvPath); // Get all candidates sorted by their first round tally. This determines the display order. // container for firstRoundTally Map<String, BigDecimal> firstRoundTally = roundTallies.get(1); // candidates sorted by first round tally List<String> sortedCandidates = sortCandidatesByTally(firstRoundTally); // totalActiveVotesPerRound is a map of round to total votes cast in each round Map<Integer, BigDecimal> totalActiveVotesPerRound = new HashMap<>(); // round indexes over all rounds plus final results round for (int round = 1; round <= numRounds; round++) { // tally is map of candidate to tally for the current round Map<String, BigDecimal> tallies = roundTallies.get(round); // total will contain total votes for all candidates in this round // this is used for calculating other derived data BigDecimal total = BigDecimal.ZERO; // tally indexes over all tallies for the current round for (BigDecimal tally : tallies.values()) { total = total.add(tally);/*w w w .ja va 2s. co m*/ } totalActiveVotesPerRound.put(round, total); } // csvPrinter will be used to write output to csv file CSVPrinter csvPrinter; try { BufferedWriter writer = Files.newBufferedWriter(Paths.get(csvPath)); csvPrinter = new CSVPrinter(writer, CSVFormat.DEFAULT); } catch (IOException exception) { Logger.log(Level.SEVERE, "Error creating CSV file: %s\n%s", csvPath, exception.toString()); throw exception; } // print contest info addHeaderRows(csvPrinter, precinct); // add a row header for the round column labels csvPrinter.print("Rounds"); // round indexes over all rounds for (int round = 1; round <= numRounds; round++) { // label string will have the actual text which goes in the cell String label = String.format("Round %d", round); // cell for round label csvPrinter.print(label); } csvPrinter.println(); // actions don't make sense in individual precinct results if (precinct == null || precinct.isEmpty()) { addActionRows(csvPrinter); } final BigDecimal totalActiveVotesFirstRound = totalActiveVotesPerRound.get(1); // For each candidate: for each round: output total votes // candidate indexes over all candidates for (String candidate : sortedCandidates) { // show each candidate row with their totals for each round // text for the candidate name String candidateDisplayName = this.config.getNameForCandidateID(candidate); csvPrinter.print(candidateDisplayName); // round indexes over all rounds for (int round = 1; round <= numRounds; round++) { // vote tally this round BigDecimal thisRoundTally = roundTallies.get(round).get(candidate); // not all candidates may have a tally in every round if (thisRoundTally == null) { thisRoundTally = BigDecimal.ZERO; } // total votes cell csvPrinter.print(thisRoundTally.toString()); } // advance to next line csvPrinter.println(); } // row for the inactive CVR counts // inactive CVR header cell csvPrinter.print("Inactive ballots"); // round indexes through all rounds for (int round = 1; round <= numRounds; round++) { // count of votes inactive this round BigDecimal thisRoundInactive = BigDecimal.ZERO; if (round > 1) { // Exhausted count is the difference between the total votes in round 1 and the total votes // in the current round. thisRoundInactive = totalActiveVotesFirstRound.subtract(totalActiveVotesPerRound.get(round)) .subtract(roundToResidualSurplus.get(round)); } // total votes cell csvPrinter.print(thisRoundInactive.toString()); } csvPrinter.println(); // row for residual surplus (if needed) // We check if we accumulated any residual surplus over the course of the tabulation by testing // whether the value in the final round is positive. if (roundToResidualSurplus.get(numRounds).signum() == 1) { csvPrinter.print("Residual surplus"); for (int round = 1; round <= numRounds; round++) { csvPrinter.print(roundToResidualSurplus.get(round).toString()); } csvPrinter.println(); } // write xls to disk try { // output stream is used to write data to disk csvPrinter.flush(); csvPrinter.close(); } catch (IOException exception) { Logger.log(Level.SEVERE, "Error saving file: %s\n%s", outputPath, exception.toString()); throw exception; } }
From source file:io.ecarf.core.cloud.task.processor.reason.phase0.DoReasonTask3.java
/** * //from w ww . j a v a2s . com * @param term * @param select * @param schemaTriples * @param rows * @param table * @param writer * @return * @throws IOException */ private int inferAndSaveTriplesToFile(Term term, List<String> select, Set<Triple> schemaTriples, BigInteger rows, String table, PrintWriter writer) throws IOException { int inferredTriples = 0; int failedTriples = 0; // loop through the instance triples probably stored in a file and generate all the triples matching the schema triples set try (BufferedReader r = new BufferedReader(new FileReader(term.getFilename()), Constants.GZIP_BUF_SIZE)) { Iterable<CSVRecord> records = CSVFormat.DEFAULT.parse(r); // records will contain lots of duplicates Set<String> inferredAlready = new HashSet<String>(); try { for (CSVRecord record : records) { String values = ((select.size() == 1) ? record.get(0) : StringUtils.join(record.values(), ',')); if (!inferredAlready.contains(values)) { inferredAlready.add(values); NTriple instanceTriple = new NTriple(); if (select.size() == 1) { instanceTriple.set(select.get(0), record.get(0)); } else { instanceTriple.set(select, record.values()); } for (Triple schemaTriple : schemaTriples) { Rule rule = GenericRule.getRule(schemaTriple); Triple inferredTriple = rule.head(schemaTriple, instanceTriple); writer.println(inferredTriple.toCsv()); inferredTriples++; } // this is just to avoid any memory issues if (inferredAlready.size() > MAX_CACHE) { inferredAlready.clear(); log.info("Cleared cache of inferred terms"); } } } } catch (Exception e) { log.error("Failed to parse selected terms", e); failedTriples++; } } //inferredFiles.add(inferredTriplesFile); log.info("\nSelect Triples: " + rows + ", Inferred: " + inferredTriples + ", Triples for term: " + term + ", Failed Triples: " + failedTriples); return inferredTriples; }
From source file:com.streamsets.pipeline.lib.parser.delimited.TestDelimitedCharDataParser.java
@Test public void testParseIgnoreHeaderWithOffset2() throws Exception { OverrunReader reader = new OverrunReader(new StringReader("A,B\na,b\ne,f"), 1000, true, false); DataParser parser = new DelimitedCharDataParser(getContext(), "id", reader, 8, 0, CSVFormat.DEFAULT, CsvHeader.IGNORE_HEADER, -1, CsvRecordType.LIST); Assert.assertEquals("8", parser.getOffset()); Record record = parser.parse();//from w w w . j a va 2 s .c o m Assert.assertNotNull(record); Assert.assertEquals("id::8", record.getHeader().getSourceId()); Assert.assertEquals("e", record.get().getValueAsList().get(0).getValueAsMap().get("value").getValueAsString()); Assert.assertFalse(record.has("[0]/header")); Assert.assertEquals("f", record.get().getValueAsList().get(1).getValueAsMap().get("value").getValueAsString()); Assert.assertFalse(record.has("[1]/header")); Assert.assertEquals("11", parser.getOffset()); record = parser.parse(); Assert.assertNull(record); Assert.assertEquals("-1", parser.getOffset()); parser.close(); }
From source file:de.tudarmstadt.ukp.dkpro.tc.svmhmm.util.SVMHMMUtils.java
/** * Given confusion matrix, it writes it in CSV and LaTeX form to the tasks output directory, * and also prints evaluations (F-measure, Precision, Recall) * * @param context task context/*from www. ja v a2s.c o m*/ * @param confusionMatrix confusion matrix * @param filePrefix prefix of output files * @throws java.io.IOException */ public static void writeOutputResults(TaskContext context, ConfusionMatrix confusionMatrix, String filePrefix) throws IOException { // storing the results as latex confusion matrix String confMatrixFileTex = (filePrefix != null ? filePrefix : "") + "confusionMatrix.tex"; File evaluationFileLaTeX = new File( context.getStorageLocation(Constants.TEST_TASK_OUTPUT_KEY, StorageService.AccessMode.READWRITE), confMatrixFileTex); FileUtils.writeStringToFile(evaluationFileLaTeX, confusionMatrix.toStringLatex()); // as CSV confusion matrix String confMatrixFileCsv = (filePrefix != null ? filePrefix : "") + "confusionMatrix.csv"; File evaluationFileCSV = new File( context.getStorageLocation(Constants.TEST_TASK_OUTPUT_KEY, StorageService.AccessMode.READWRITE), confMatrixFileCsv); CSVPrinter csvPrinter = new CSVPrinter(new FileWriter(evaluationFileCSV), CSVFormat.DEFAULT); csvPrinter.printRecords(confusionMatrix.toStringMatrix()); IOUtils.closeQuietly(csvPrinter); // and results File evaluationFile = new File( context.getStorageLocation(Constants.TEST_TASK_OUTPUT_KEY, StorageService.AccessMode.READWRITE), new SVMHMMAdapter() .getFrameworkFilename(TCMachineLearningAdapter.AdapterNameEntries.evaluationFile)); PrintWriter pw = new PrintWriter(evaluationFile); pw.println(confusionMatrix.printNiceResults()); pw.println(confusionMatrix.printLabelPrecRecFm()); pw.println(confusionMatrix.printClassDistributionGold()); IOUtils.closeQuietly(pw); }
From source file:io.ecarf.core.cloud.task.processor.reason.phase0.DoReasonTask4.java
/** * //from ww w . j av a2s.c om * @param term * @param select * @param schemaTriples * @param rows * @param table * @param writer * @return * @throws IOException */ private int inferAndSaveTriplesToFile(Term term, List<String> select, Set<Triple> schemaTriples, BigInteger rows, String table, PrintWriter writer) throws IOException { int inferredTriples = 0; int failedTriples = 0; // loop through the instance triples probably stored in a file and generate all the triples matching the schema triples set try (BufferedReader r = new BufferedReader(new FileReader(term.getFilename()), Constants.GZIP_BUF_SIZE)) { Iterable<CSVRecord> records = CSVFormat.DEFAULT.parse(r); // records will contain lots of duplicates Set<String> inferredAlready = new HashSet<String>(); try { for (CSVRecord record : records) { String values = ((select.size() == 1) ? record.get(0) : StringUtils.join(record.values(), ',')); if (!inferredAlready.contains(values)) { inferredAlready.add(values); NTriple instanceTriple = new NTriple(); if (select.size() == 1) { instanceTriple.set(select.get(0), record.get(0)); } else { instanceTriple.set(select, record.values()); } for (Triple schemaTriple : schemaTriples) { Rule rule = GenericRule.getRule(schemaTriple); Triple inferredTriple = rule.head(schemaTriple, instanceTriple); writer.println(inferredTriple.toCsv()); inferredTriples++; } // this is just to avoid any memory issues if (inferredAlready.size() > MAX_CACHE) { inferredAlready.clear(); log.info("Cleared cache of inferred terms"); } } else { this.duplicates++; } } } catch (Exception e) { log.error("Failed to parse selected terms", e); failedTriples++; } } //inferredFiles.add(inferredTriplesFile); log.info("\nSelect Triples: " + rows + ", Inferred: " + inferredTriples + ", Triples for term: " + term + ", Failed Triples: " + failedTriples); return inferredTriples; }
From source file:io.swagger.inflector.controllers.SwaggerOperationController.java
@Override public Response apply(ContainerRequestContext ctx) { List<Parameter> parameters = operation.getParameters(); final RequestContext requestContext = createContext(ctx); String path = ctx.getUriInfo().getPath(); Map<String, Map<String, String>> formMap = new HashMap<String, Map<String, String>>(); Map<String, File> inputStreams = new HashMap<String, File>(); Object[] args = new Object[parameters.size() + 1]; if (parameters != null) { int i = 0; args[i] = requestContext;//from w ww . ja v a 2 s .c o m i += 1; List<ValidationMessage> missingParams = new ArrayList<ValidationMessage>(); UriInfo uri = ctx.getUriInfo(); String formDataString = null; String[] parts = null; Set<String> existingKeys = new HashSet<String>(); for (Iterator<String> x = uri.getQueryParameters().keySet().iterator(); x.hasNext();) { existingKeys.add(x.next() + ": qp"); } for (Iterator<String> x = uri.getPathParameters().keySet().iterator(); x.hasNext();) { existingKeys.add(x.next() + ": pp"); } for (Iterator<String> x = ctx.getHeaders().keySet().iterator(); x.hasNext();) { String key = x.next(); // if(!commonHeaders.contains(key)) // existingKeys.add(key); } MediaType mt = requestContext.getMediaType(); for (Parameter p : parameters) { Map<String, String> headers = new HashMap<String, String>(); String name = null; if (p instanceof FormParameter) { if (formDataString == null) { // can only read stream once if (mt.isCompatible(MediaType.MULTIPART_FORM_DATA_TYPE)) { // get the boundary String boundary = mt.getParameters().get("boundary"); if (boundary != null) { try { InputStream output = ctx.getEntityStream(); MultipartStream multipartStream = new MultipartStream(output, boundary.getBytes()); boolean nextPart = multipartStream.skipPreamble(); while (nextPart) { String header = multipartStream.readHeaders(); // process headers if (header != null) { CSVFormat format = CSVFormat.DEFAULT.withDelimiter(';') .withRecordSeparator("="); Iterable<CSVRecord> records = format.parse(new StringReader(header)); for (CSVRecord r : records) { for (int j = 0; j < r.size(); j++) { String string = r.get(j); Iterable<CSVRecord> outerString = CSVFormat.DEFAULT .withDelimiter('=').parse(new StringReader(string)); for (CSVRecord outerKvPair : outerString) { if (outerKvPair.size() == 2) { String key = outerKvPair.get(0).trim(); String value = outerKvPair.get(1).trim(); if ("name".equals(key)) { name = value; } headers.put(key, value); } else { Iterable<CSVRecord> innerString = CSVFormat.DEFAULT .withDelimiter(':') .parse(new StringReader(string)); for (CSVRecord innerKVPair : innerString) { if (innerKVPair.size() == 2) { String key = innerKVPair.get(0).trim(); String value = innerKVPair.get(1).trim(); if ("name".equals(key)) { name = value; } headers.put(key, value); } } } } if (name != null) { formMap.put(name, headers); } } } } String filename = extractFilenameFromHeaders(headers); if (filename != null) { try { File file = new File(Files.createTempDir(), filename); file.deleteOnExit(); file.getParentFile().deleteOnExit(); FileOutputStream fo = new FileOutputStream(file); multipartStream.readBodyData(fo); inputStreams.put(name, file); } catch (Exception e) { LOGGER.error("Failed to extract uploaded file", e); } } else { ByteArrayOutputStream bo = new ByteArrayOutputStream(); multipartStream.readBodyData(bo); String value = bo.toString(); headers.put(name, value); } if (name != null) { formMap.put(name, headers); } headers = new HashMap<>(); name = null; nextPart = multipartStream.readBoundary(); } } catch (IOException e) { e.printStackTrace(); } } } else { try { formDataString = IOUtils.toString(ctx.getEntityStream(), "UTF-8"); parts = formDataString.split("&"); for (String part : parts) { String[] kv = part.split("="); existingKeys.add(kv[0] + ": fp"); } } catch (IOException e) { e.printStackTrace(); } } } } } for (Parameter parameter : parameters) { String in = parameter.getIn(); Object o = null; try { if ("formData".equals(in)) { SerializableParameter sp = (SerializableParameter) parameter; String name = parameter.getName(); if (mt.isCompatible(MediaType.MULTIPART_FORM_DATA_TYPE)) { // look in the form map Map<String, String> headers = formMap.get(name); if (headers != null && headers.size() > 0) { if ("file".equals(sp.getType())) { o = inputStreams.get(name); } else { Object obj = headers.get(parameter.getName()); if (obj != null) { JavaType jt = parameterClasses[i]; Class<?> cls = jt.getRawClass(); List<String> os = Arrays.asList(obj.toString()); try { o = validator.convertAndValidate(os, parameter, cls, definitions); } catch (ConversionException e) { missingParams.add(e.getError()); } catch (ValidationException e) { missingParams.add(e.getValidationMessage()); } } } } } else { if (formDataString != null) { for (String part : parts) { String[] kv = part.split("="); if (kv != null) { if (kv.length > 0) { existingKeys.remove(kv[0] + ": fp"); } if (kv.length == 2) { // TODO how to handle arrays here? String key = kv[0]; try { String value = URLDecoder.decode(kv[1], "utf-8"); if (parameter.getName().equals(key)) { JavaType jt = parameterClasses[i]; Class<?> cls = jt.getRawClass(); try { o = validator.convertAndValidate(Arrays.asList(value), parameter, cls, definitions); } catch (ConversionException e) { missingParams.add(e.getError()); } catch (ValidationException e) { missingParams.add(e.getValidationMessage()); } } } catch (UnsupportedEncodingException e) { LOGGER.error("unable to decode value for " + key); } } } } } } } else { try { String paramName = parameter.getName(); if ("query".equals(in)) { existingKeys.remove(paramName + ": qp"); } if ("path".equals(in)) { existingKeys.remove(paramName + ": pp"); } JavaType jt = parameterClasses[i]; Class<?> cls = jt.getRawClass(); if ("body".equals(in)) { if (ctx.hasEntity()) { BodyParameter body = (BodyParameter) parameter; o = EntityProcessorFactory.readValue(ctx.getMediaType(), ctx.getEntityStream(), cls); if (o != null) { validate(o, body.getSchema(), SchemaValidator.Direction.INPUT); } } else if (parameter.getRequired()) { ValidationException e = new ValidationException(); e.message(new ValidationMessage() .message("The input body `" + paramName + "` is required")); throw e; } } if ("query".equals(in)) { o = validator.convertAndValidate(uri.getQueryParameters().get(parameter.getName()), parameter, cls, definitions); } else if ("path".equals(in)) { o = validator.convertAndValidate(uri.getPathParameters().get(parameter.getName()), parameter, cls, definitions); } else if ("header".equals(in)) { o = validator.convertAndValidate(ctx.getHeaders().get(parameter.getName()), parameter, cls, definitions); } } catch (ConversionException e) { missingParams.add(e.getError()); } catch (ValidationException e) { missingParams.add(e.getValidationMessage()); } } } catch (NumberFormatException e) { LOGGER.error("Couldn't find " + parameter.getName() + " (" + in + ") to " + parameterClasses[i], e); } args[i] = o; i += 1; } if (existingKeys.size() > 0) { LOGGER.debug("unexpected keys: " + existingKeys); } if (missingParams.size() > 0) { StringBuilder builder = new StringBuilder(); builder.append("Input error"); if (missingParams.size() > 1) { builder.append("s"); } builder.append(": "); int count = 0; for (ValidationMessage message : missingParams) { if (count > 0) { builder.append(", "); } if (message != null && message.getMessage() != null) { builder.append(message.getMessage()); } else { builder.append("no additional input"); } count += 1; } int statusCode = config.getInvalidRequestStatusCode(); ApiError error = new ApiError().code(statusCode).message(builder.toString()); throw new ApiException(error); } } try { if (method != null) { LOGGER.info("calling method " + method + " on controller " + this.controller + " with args " + Arrays.toString(args)); try { Object response = method.invoke(controller, args); if (response instanceof ResponseContext) { ResponseContext wrapper = (ResponseContext) response; ResponseBuilder builder = Response.status(wrapper.getStatus()); // response headers for (String key : wrapper.getHeaders().keySet()) { List<String> v = wrapper.getHeaders().get(key); if (v.size() == 1) { builder.header(key, v.get(0)); } else { builder.header(key, v); } } // entity if (wrapper.getEntity() != null) { builder.entity(wrapper.getEntity()); // content type if (wrapper.getContentType() != null) { builder.type(wrapper.getContentType()); } else { final ContextResolver<ContentTypeSelector> selector = providersProvider.get() .getContextResolver(ContentTypeSelector.class, MediaType.WILDCARD_TYPE); if (selector != null) { selector.getContext(getClass()).apply(ctx.getAcceptableMediaTypes(), builder); } } if (operation.getResponses() != null) { String responseCode = String.valueOf(wrapper.getStatus()); io.swagger.models.Response responseSchema = operation.getResponses() .get(responseCode); if (responseSchema == null) { // try default response schema responseSchema = operation.getResponses().get("default"); } if (responseSchema != null && responseSchema.getSchema() != null) { validate(wrapper.getEntity(), responseSchema.getSchema(), SchemaValidator.Direction.OUTPUT); } else { LOGGER.debug( "no response schema for code " + responseCode + " to validate against"); } } } return builder.build(); } return Response.ok().entity(response).build(); } catch (IllegalArgumentException | IllegalAccessException | InvocationTargetException e) { for (Throwable cause = e.getCause(); cause != null;) { if (cause instanceof ApiException) { throw (ApiException) cause; } final Throwable next = cause.getCause(); cause = next == cause || next == null ? null : next; } throw new ApiException(ApiErrorUtils.createInternalError(), e); } } Map<String, io.swagger.models.Response> responses = operation.getResponses(); if (responses != null) { String[] keys = new String[responses.keySet().size()]; Arrays.sort(responses.keySet().toArray(keys)); int code = 0; String defaultKey = null; for (String key : keys) { if (key.startsWith("2")) { defaultKey = key; code = Integer.parseInt(key); break; } if ("default".equals(key)) { defaultKey = key; code = 200; break; } if (key.startsWith("3")) { // we use the 3xx responses as defaults defaultKey = key; code = Integer.parseInt(key); } } if (defaultKey != null) { ResponseBuilder builder = Response.status(code); io.swagger.models.Response response = responses.get(defaultKey); if (response.getHeaders() != null && response.getHeaders().size() > 0) { for (String key : response.getHeaders().keySet()) { Property headerProperty = response.getHeaders().get(key); Object output = ExampleBuilder.fromProperty(headerProperty, definitions); if (output instanceof ArrayExample) { output = ((ArrayExample) output).asString(); } else if (output instanceof ObjectExample) { LOGGER.debug( "not serializing output example, only primitives or arrays of primitives are supported"); } else { output = ((Example) output).asString(); } builder.header(key, output); } } Map<String, Object> examples = response.getExamples(); if (examples != null) { for (MediaType mediaType : requestContext.getAcceptableMediaTypes()) { for (String key : examples.keySet()) { if (MediaType.valueOf(key).isCompatible(mediaType)) { builder.entity(examples.get(key)).type(mediaType); return builder.build(); } } } } Object output = ExampleBuilder.fromProperty(response.getSchema(), definitions); if (output != null) { ResponseContext resp = new ResponseContext().entity(output); setContentType(requestContext, resp, operation); builder.entity(output); if (resp.getContentType() != null) { // this comes from the operation itself builder.type(resp.getContentType()); } else { // get acceptable content types List<EntityProcessor> processors = EntityProcessorFactory.getProcessors(); MediaType responseMediaType = null; // take first compatible one for (EntityProcessor processor : processors) { if (responseMediaType != null) { break; } for (MediaType mt : requestContext.getAcceptableMediaTypes()) { LOGGER.debug("checking type " + mt.toString() + " against " + processor.getClass().getName()); if (processor.supports(mt)) { builder.type(mt); responseMediaType = mt; break; } } } if (responseMediaType == null) { // no match based on Accept header, use first processor in list for (EntityProcessor processor : processors) { List<MediaType> supportedTypes = processor.getSupportedMediaTypes(); if (supportedTypes.size() > 0) { builder.type(supportedTypes.get(0)); break; } } } } builder.entity(output); } return builder.build(); } else { LOGGER.debug("no response type to map to, assume 200"); code = 200; } return Response.status(code).build(); } return Response.ok().build(); } finally { for (String key : inputStreams.keySet()) { File file = inputStreams.get(key); if (file != null) { LOGGER.debug("deleting file " + file.getPath()); file.delete(); } } } }
From source file:com.act.lcms.db.analysis.StandardIonAnalysis.java
public static void main(String[] args) throws Exception { Options opts = new Options(); for (Option.Builder b : OPTION_BUILDERS) { opts.addOption(b.build());/*from w w w . j a v a 2 s . c o m*/ } CommandLine cl = null; try { CommandLineParser parser = new DefaultParser(); cl = parser.parse(opts, args); } catch (ParseException e) { System.err.format("Argument parsing failed: %s\n", e.getMessage()); HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } if (cl.hasOption("help")) { HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); return; } File lcmsDir = new File(cl.getOptionValue(OPTION_DIRECTORY)); if (!lcmsDir.isDirectory()) { System.err.format("File at %s is not a directory\n", lcmsDir.getAbsolutePath()); HELP_FORMATTER.printHelp(LoadPlateCompositionIntoDB.class.getCanonicalName(), HELP_MESSAGE, opts, null, true); System.exit(1); } try (DB db = DB.openDBFromCLI(cl)) { ScanFile.insertOrUpdateScanFilesInDirectory(db, lcmsDir); StandardIonAnalysis analysis = new StandardIonAnalysis(); HashMap<Integer, Plate> plateCache = new HashMap<>(); String plateBarcode = cl.getOptionValue(OPTION_STANDARD_PLATE_BARCODE); String inputChemicals = cl.getOptionValue(OPTION_STANDARD_CHEMICAL); String medium = cl.getOptionValue(OPTION_MEDIUM); // If standard chemical is specified, do standard LCMS ion selection analysis if (inputChemicals != null && !inputChemicals.equals("")) { String[] chemicals; if (!inputChemicals.contains(",")) { chemicals = new String[1]; chemicals[0] = inputChemicals; } else { chemicals = inputChemicals.split(","); } String outAnalysis = cl.getOptionValue(OPTION_OUTPUT_PREFIX) + "." + CSV_FORMAT; String plottingDirectory = cl.getOptionValue(OPTION_PLOTTING_DIR); String[] headerStrings = { "Molecule", "Plate Bar Code", "LCMS Detection Results" }; CSVPrinter printer = new CSVPrinter(new FileWriter(outAnalysis), CSVFormat.DEFAULT.withHeader(headerStrings)); for (String inputChemical : chemicals) { List<StandardWell> standardWells; Plate queryPlate = Plate.getPlateByBarcode(db, cl.getOptionValue(OPTION_STANDARD_PLATE_BARCODE)); if (plateBarcode != null && medium != null) { standardWells = analysis.getStandardWellsForChemicalInSpecificPlateAndMedium(db, inputChemical, queryPlate.getId(), medium); } else if (plateBarcode != null) { standardWells = analysis.getStandardWellsForChemicalInSpecificPlate(db, inputChemical, queryPlate.getId()); } else { standardWells = analysis.getStandardWellsForChemical(db, inputChemical); } if (standardWells.size() == 0) { throw new RuntimeException("Found no LCMS wells for " + inputChemical); } // Sort in descending order of media where MeOH and Water related media are promoted to the top and // anything derived from yeast media are demoted. Collections.sort(standardWells, new Comparator<StandardWell>() { @Override public int compare(StandardWell o1, StandardWell o2) { if (StandardWell.doesMediaContainYeastExtract(o1.getMedia()) && !StandardWell.doesMediaContainYeastExtract(o2.getMedia())) { return 1; } else { return 0; } } }); Map<StandardWell, StandardIonResult> wellToIonRanking = StandardIonAnalysis .getBestMetlinIonsForChemical(inputChemical, lcmsDir, db, standardWells, plottingDirectory); if (wellToIonRanking.size() != standardWells.size() && !cl.hasOption(OPTION_OVERRIDE_NO_SCAN_FILE_FOUND)) { throw new Exception("Could not find a scan file associated with one of the standard wells"); } for (StandardWell well : wellToIonRanking.keySet()) { LinkedHashMap<String, XZ> snrResults = wellToIonRanking.get(well).getAnalysisResults(); String snrRankingResults = ""; int numResultsToShow = 0; Plate plateForWellToAnalyze = Plate.getPlateById(db, well.getPlateId()); for (Map.Entry<String, XZ> ionToSnrAndTime : snrResults.entrySet()) { if (numResultsToShow > 3) { break; } String ion = ionToSnrAndTime.getKey(); XZ snrAndTime = ionToSnrAndTime.getValue(); snrRankingResults += String.format(ion + " (%.2f SNR at %.2fs); ", snrAndTime.getIntensity(), snrAndTime.getTime()); numResultsToShow++; } String[] resultSet = { inputChemical, plateForWellToAnalyze.getBarcode() + " " + well.getCoordinatesString() + " " + well.getMedia() + " " + well.getConcentration(), snrRankingResults }; printer.printRecord(resultSet); } } try { printer.flush(); printer.close(); } catch (IOException e) { System.err.println("Error while flushing/closing csv writer."); e.printStackTrace(); } } else { // Get the set of chemicals that includes the construct and all it's intermediates Pair<ConstructEntry, List<ChemicalAssociatedWithPathway>> constructAndPathwayChems = analysis .getChemicalsForConstruct(db, cl.getOptionValue(OPTION_CONSTRUCT)); System.out.format("Construct: %s\n", constructAndPathwayChems.getLeft().getCompositionId()); for (ChemicalAssociatedWithPathway pathwayChem : constructAndPathwayChems.getRight()) { System.out.format(" Pathway chem %s\n", pathwayChem.getChemical()); // Get all the standard wells for the pathway chemicals. These wells contain only the // the chemical added with controlled solutions (ie no organism or other chemicals in the // solution) List<StandardWell> standardWells; if (plateBarcode != null) { Plate queryPlate = Plate.getPlateByBarcode(db, cl.getOptionValue(OPTION_STANDARD_PLATE_BARCODE)); standardWells = analysis.getStandardWellsForChemicalInSpecificPlate(db, pathwayChem.getChemical(), queryPlate.getId()); } else { standardWells = analysis.getStandardWellsForChemical(db, pathwayChem.getChemical()); } for (StandardWell wellToAnalyze : standardWells) { List<StandardWell> negativeControls = analysis.getViableNegativeControlsForStandardWell(db, wellToAnalyze); Map<StandardWell, List<ScanFile>> allViableScanFiles = analysis .getViableScanFilesForStandardWells(db, wellToAnalyze, negativeControls); List<String> primaryStandardScanFileNames = new ArrayList<>(); for (ScanFile scanFile : allViableScanFiles.get(wellToAnalyze)) { primaryStandardScanFileNames.add(scanFile.getFilename()); } Plate plate = plateCache.get(wellToAnalyze.getPlateId()); if (plate == null) { plate = Plate.getPlateById(db, wellToAnalyze.getPlateId()); plateCache.put(plate.getId(), plate); } System.out.format(" Standard well: %s @ %s, '%s'%s%s\n", plate.getBarcode(), wellToAnalyze.getCoordinatesString(), wellToAnalyze.getChemical(), wellToAnalyze.getMedia() == null ? "" : String.format(" in %s", wellToAnalyze.getMedia()), wellToAnalyze.getConcentration() == null ? "" : String.format(" @ %s", wellToAnalyze.getConcentration())); System.out.format(" Scan files: %s\n", StringUtils.join(primaryStandardScanFileNames, ", ")); for (StandardWell negCtrlWell : negativeControls) { plate = plateCache.get(negCtrlWell.getPlateId()); if (plate == null) { plate = Plate.getPlateById(db, negCtrlWell.getPlateId()); plateCache.put(plate.getId(), plate); } List<String> negativeControlScanFileNames = new ArrayList<>(); for (ScanFile scanFile : allViableScanFiles.get(negCtrlWell)) { negativeControlScanFileNames.add(scanFile.getFilename()); } System.out.format(" Viable negative: %s @ %s, '%s'%s%s\n", plate.getBarcode(), negCtrlWell.getCoordinatesString(), negCtrlWell.getChemical(), negCtrlWell.getMedia() == null ? "" : String.format(" in %s", negCtrlWell.getMedia()), negCtrlWell.getConcentration() == null ? "" : String.format(" @ %s", negCtrlWell.getConcentration())); System.out.format(" Scan files: %s\n", StringUtils.join(negativeControlScanFileNames, ", ")); // TODO: do something useful with the standard wells and their scan files, and then stop all the printing. } } } } } }