List of usage examples for java.nio.charset Charset name
String name
To view the source code for java.nio.charset Charset name.
Click Source Link
From source file:org.sonar.batch.issue.ignore.scanner.IssueExclusionsRegexpScanner.java
public void scan(String resource, File file, Charset sourcesEncoding) throws IOException { LOG.debug("Scanning {}", resource); init();//from w ww.j a va 2 s . c o m List<String> lines = FileUtils.readLines(file, sourcesEncoding.name()); int lineIndex = 0; for (String line : lines) { lineIndex++; if (line.trim().length() == 0) { continue; } // first check the single regexp patterns that can be used to totally exclude a file for (java.util.regex.Pattern pattern : allFilePatterns) { if (pattern.matcher(line).find()) { exclusionPatternInitializer.getPatternMatcher().addPatternToExcludeResource(resource); // nothing more to do on this file LOG.debug("- Exclusion pattern '{}': every violation in this file will be ignored.", pattern); return; } } // then check the double regexps if we're still here checkDoubleRegexps(line, lineIndex); } if (currentMatcher != null && !currentMatcher.hasSecondPattern()) { // this will happen when there is a start block regexp but no end block regexp endExclusion(lineIndex + 1); } // now create the new line-based pattern for this file if there are exclusions fileLength = lineIndex; if (!lineExclusions.isEmpty()) { Set<LineRange> lineRanges = convertLineExclusionsToLineRanges(); LOG.debug("- Line exclusions found: {}", lineRanges); exclusionPatternInitializer.getPatternMatcher().addPatternToExcludeLines(resource, lineRanges); } }
From source file:org.sonar.api.batch.AbstractSourceImporter.java
protected void parseDirs(SensorContext context, List<File> files, List<File> sourceDirs, boolean unitTest, Charset sourcesEncoding) { for (File file : files) { Resource resource = createResource(file, sourceDirs, unitTest); if (resource != null) { try { String source = FileUtils.readFileToString(file, sourcesEncoding.name()); context.index(resource); context.saveSource(resource, source); } catch (IOException e) { throw new SonarException("Unable to read and import the source file : '" + file.getAbsolutePath() + "' with the charset : '" + sourcesEncoding.name() + "'.", e); }//from www .j a v a2 s. co m } } }
From source file:de.drv.dsrv.spoc.web.service.impl.SpocResponseHandler.java
@Override public StreamSource handleResponse(final HttpResponse httpResponse) throws IOException { final HttpEntity httpResponseEntity = getAndCheckHttpEntity(httpResponse); // Kopiert den InputStream der Response, damit die Connection // geschlossen werden kann. final ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); IOUtils.copy(httpResponseEntity.getContent(), outputStream); EntityUtils.consume(httpResponseEntity); final ByteArrayInputStream inputStream = new ByteArrayInputStream(outputStream.toByteArray()); // Initialisiert die zurueck gegebene Source. StreamSource responseSource;//w w w.j a va2 s. c o m final Charset charset = getCharsetFromResponse(httpResponseEntity); if (charset != null) { if (LOG.isDebugEnabled()) { LOG.debug("F\u00fcr die Antwort des Fachverfahrens wird das Charset >" + charset.name() + "< aus dem Content-Type Header verwendet."); } responseSource = new StreamSource(new InputStreamReader(inputStream, charset)); } else { if (LOG.isDebugEnabled()) { LOG.debug("Die Antwort des Fachverfahren hat kein Charset im Content-Type Header spezifiziert." + " Der Payload wird dem XML-Parser ohne Charset \u00fcbergeben," + " dieses sollte aber im XML-Prolog spezifiziert sein."); } responseSource = new StreamSource(inputStream); } return responseSource; }
From source file:org.apache.tika.parser.txt.TXTParser.java
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { // Automatically detect the character encoding try (AutoDetectReader reader = new AutoDetectReader(new CloseShieldInputStream(stream), metadata, getEncodingDetector(context))) { //try to get detected content type; could be a subclass of text/plain //such as vcal, etc. String incomingMime = metadata.get(Metadata.CONTENT_TYPE); MediaType mediaType = MediaType.TEXT_PLAIN; if (incomingMime != null) { MediaType tmpMediaType = MediaType.parse(incomingMime); if (tmpMediaType != null) { mediaType = tmpMediaType; }/*ww w . j a v a2s. c om*/ } Charset charset = reader.getCharset(); MediaType type = new MediaType(mediaType, charset); metadata.set(Metadata.CONTENT_TYPE, type.toString()); // deprecated, see TIKA-431 metadata.set(Metadata.CONTENT_ENCODING, charset.name()); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); xhtml.startElement("p"); char[] buffer = new char[4096]; int n = reader.read(buffer); while (n != -1) { xhtml.characters(buffer, 0, n); n = reader.read(buffer); } xhtml.endElement("p"); xhtml.endDocument(); } }
From source file:net.sf.jsog.spring.StringJsogHttpMessageConverter.java
@Override public JSOG read(Class<? extends JSOG> clazz, HttpInputMessage input) throws IOException, HttpMessageNotReadableException { HttpHeaders headers = input.getHeaders(); MediaType contentType = headers.getContentType(); Charset encoding = contentType.getCharSet(); if (encoding == null) { encoding = this.encoding; }/*from www . jav a 2s . co m*/ // Read in the JSON String json = IOUtils.toString(input.getBody(), encoding.name()); // Parse the JSON and return a JSOG. try { return JSOG.parse(json); } catch (IOException e) { throw new HttpMessageNotReadableException("Unable to parse JSON.", e); } }
From source file:org.apache.tika.parser.isatab.ISATabAssayParser.java
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { // Automatically detect the character encoding AutoDetectReader reader = new AutoDetectReader(new CloseShieldInputStream(stream), metadata, context.get(ServiceLoader.class, LOADER)); CSVParser csvParser = null;// w w w . ja v a 2s . c o m try { Charset charset = reader.getCharset(); MediaType type = new MediaType(MediaType.application("x-isatab-assay"), charset); metadata.set(Metadata.CONTENT_TYPE, type.toString()); metadata.set(Metadata.CONTENT_ENCODING, charset.name()); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.newline(); csvParser = new CSVParser(reader, CSVFormat.TDF); xhtml.startDocument(); xhtml.newline(); xhtml.startElement("table"); xhtml.newline(); List<CSVRecord> records = csvParser.getRecords(); for (int i = 0; i < records.get(0).size(); i++) { xhtml.startElement("th"); xhtml.characters(records.get(0).get(i)); xhtml.endElement("th"); xhtml.newline(); } for (int i = 1; i < records.size(); i++) { xhtml.startElement("tr"); xhtml.newline(); for (int j = 0; j < records.get(i).size(); j++) { xhtml.startElement("td"); xhtml.characters(records.get(i).get(j)); xhtml.endElement("td"); xhtml.newline(); } xhtml.endElement("tr"); xhtml.newline(); } xhtml.endElement("table"); xhtml.newline(); xhtml.endDocument(); } finally { reader.close(); csvParser.close(); } }
From source file:org.apache.tika.parser.isatab.ISATabStudyParser.java
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { // Automatically detect the character encoding AutoDetectReader reader = new AutoDetectReader(new CloseShieldInputStream(stream), metadata, context.get(ServiceLoader.class, LOADER)); CSVParser csvParser = null;//from w w w . ja v a 2 s .c o m try { Charset charset = reader.getCharset(); MediaType type = new MediaType(MediaType.application("x-isatab-study"), charset); metadata.set(Metadata.CONTENT_TYPE, type.toString()); metadata.set(Metadata.CONTENT_ENCODING, charset.name()); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.newline(); csvParser = new CSVParser(reader, CSVFormat.TDF); xhtml.startDocument(); xhtml.newline(); xhtml.startElement("table"); xhtml.newline(); List<CSVRecord> records = csvParser.getRecords(); for (int i = 0; i < records.get(0).size(); i++) { xhtml.startElement("th"); xhtml.characters(records.get(0).get(i)); xhtml.endElement("th"); xhtml.newline(); } for (int i = 1; i < records.get(0).size(); i++) { xhtml.startElement("tr"); xhtml.newline(); for (int j = 0; j < records.get(i).size(); j++) { xhtml.startElement("td"); xhtml.characters(records.get(i).get(j)); xhtml.endElement("td"); xhtml.newline(); } xhtml.endElement("tr"); xhtml.newline(); } xhtml.endElement("table"); xhtml.newline(); xhtml.endDocument(); } finally { reader.close(); csvParser.close(); } }
From source file:org.sonar.plugins.xml.parsers.LineCountParser.java
private void processBlankLines(File file, Charset encoding) throws IOException { Set<Integer> blankLines = new HashSet<>(); String lineSeparatorRegexp = "(?:\r)?\n|\r"; String fileContent = FileUtils.readFileToString(file, encoding.name()); int currentLine = 0; for (String line : fileContent.split(lineSeparatorRegexp, -1)) { currentLine++;/*from w ww .j a v a 2s. co m*/ if (StringUtils.isBlank(line)) { blankLines.add(currentLine); } } linesNumber = currentLine; linesOfCodeLines = new HashSet<>(); for (int line = 1; line <= linesNumber; line++) { if (!blankLines.contains(line) && !commentHandler.commentLines.contains(line)) { linesOfCodeLines.add(line); } } }
From source file:com.couchbase.http.mime.MultipartEntity.java
protected String generateContentType(final String boundary, final Charset charset) { StringBuilder buffer = new StringBuilder(); buffer.append("multipart/related; boundary="); buffer.append(boundary);//from ww w . j a v a 2s . co m if (charset != null) { buffer.append("; charset="); buffer.append(charset.name()); } return buffer.toString(); }