List of usage examples for org.apache.commons.io.input CharSequenceReader CharSequenceReader
public CharSequenceReader(CharSequence charSequence)
From source file:de.fhg.iais.asc.transformer.jdom.splitter.SimpleSplitterTestBase.java
@Override protected Document createTestDoc() throws JDOMException, IOException { StringBuilder sb = new StringBuilder(this.xmlContent.getPrefix()); for (int i = 0; i < this.REPEAT_COUNT; ++i) { sb.append(this.xmlContent.getRepeat()); }/*from ww w. j a v a2s . c o m*/ sb.append(this.xmlContent.getSuffix()); SAXBuilder saxBuilder = new SAXBuilder(); return saxBuilder.build(new CharSequenceReader(sb)); }
From source file:com.ocs.dynamo.importer.impl.BaseFixedLengthImporter.java
/** * Reads a byte array into a CSV file// w w w . j av a 2 s . c om * * @param bytes * @return * @throws IOException */ protected List<String[]> readFixedLengthFile(byte[] bytes, List<Integer> fieldLengths) { try (BufferedReader reader = new BufferedReader( new CharSequenceReader(new String(bytes, DynamoConstants.UTF_8)))) { List<String[]> result = new ArrayList<String[]>(); String line = reader.readLine(); while (line != null) { List<String> temp = new ArrayList<>(); int start = 0; for (Integer len : fieldLengths) { if (start + len <= line.length()) { // there is space String field = line.substring(start, start + len); temp.add(field.trim()); } else if (start <= line.length()) { String field = line.substring(start, line.length()); temp.add(field.trim()); } start += len; } result.add(temp.toArray(new String[0])); line = reader.readLine(); } return result; } catch (IOException ex) { throw new OCSImportException(ex.getMessage(), ex); } }
From source file:com.cloudbees.jenkins.support.filter.FilteredWriterTest.java
@Issue("JENKINS-21670") @Test//from w w w . ja va 2 s. c o m public void shouldSupportLinesLongerThanDefaultBufferSize() throws Exception { CharBuffer input = CharBuffer.allocate(FilteredConstants.DEFAULT_DECODER_CAPACITY * 10); for (int i = 0; i < input.capacity(); i++) { input.put('+'); } input.flip(); CharSequenceReader reader = new CharSequenceReader(input); ContentFilter filter = s -> s.replace('+', '-'); StringWriter output = new StringWriter(); FilteredWriter writer = new FilteredWriter(output, filter); IOUtils.copy(reader, writer); assertThat(output.toString()).isEmpty(); writer.flush(); assertThat(output.toString()).isNotEmpty().matches("^-+$"); }
From source file:de.fhg.iais.asc.transformer.jdom.splitter.AdvancedSplitterTestBase.java
@Override protected Document createTestDoc() throws JDOMException, IOException { StringBuilder sb = new StringBuilder(this.xmlContent.getContent()); SAXBuilder saxBuilder = new SAXBuilder(); try {// w w w . j a va 2 s. co m return saxBuilder.build(new CharSequenceReader(sb)); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException(e); } }
From source file:ch.sentric.hbase.prospective.Percolator.java
/** * Tries to find a set of queries that match the given document. * * @param doc//from w w w . j a va2 s.co m * the Lucene document * @return the matching queries * @throws IOException * if an I/O error occurs */ public Response<T> percolate(final Document doc, final Map<T, Query> queries) throws IOException { // first, parse the source doc into a MemoryIndex final MemoryIndex memoryIndex = new MemoryIndex(); for (final Fieldable field : doc.getFields()) { if (!field.isIndexed()) { continue; } final TokenStream tokenStream = field.tokenStreamValue(); if (tokenStream != null) { memoryIndex.addField(field.name(), tokenStream, field.getBoost()); } else { final Reader reader = field.readerValue(); if (reader != null) { memoryIndex.addField(field.name(), analyzer.reusableTokenStream(field.name(), reader), field.getBoost()); } else { final String value = field.stringValue(); if (value != null) { memoryIndex.addField(field.name(), analyzer.reusableTokenStream(field.name(), new CharSequenceReader(value)), field.getBoost()); } } } } // do the search final IndexSearcher searcher = memoryIndex.createSearcher(); final Map<T, Query> matches = new HashMap<T, Query>(0); if (queries != null && !queries.isEmpty()) { final ExistsCollector collector = new ExistsCollector(); for (final Map.Entry<T, Query> entry : queries.entrySet()) { collector.reset(); searcher.search(entry.getValue(), collector); if (collector.exists()) { matches.put(entry.getKey(), entry.getValue()); } } } return new Response<T>(matches); }
From source file:hudson.plugins.clearcase.ucm.model.ActivitiesTest.java
@Test public void parseTwoActivitiesOneLeftOneRight() throws IOException { String lines = Strings.join("<< act1@\\pvob \"Activity 1\"", ">> act2@\\pvob \"Activity 2\"").with("\n"); Reader reader = new CharSequenceReader(lines); ActivitiesDelta activities = ActivitiesDelta.parse(reader); assertThat(activities.getLeft()).hasSize(1); assertThat(activities.getRight()).hasSize(1); Activity leftActivity = activities.getLeft().iterator().next(); assertThat(leftActivity.getSelector()).isEqualTo("activity:act1@\\pvob"); assertThat(leftActivity.getHeadline()).isEqualTo("Activity 1"); Activity rightActivity = activities.getRight().iterator().next(); assertThat(rightActivity.getSelector()).isEqualTo("activity:act2@\\pvob"); assertThat(rightActivity.getHeadline()).isEqualTo("Activity 2"); }
From source file:lux.search.highlight.XmlHighlighter.java
public XdmNode highlight(Query query, NodeInfo node) throws XMLStreamException, SaxonApiException { if (needsPositions(query)) { // A partial workaround for highlighting element text queries with phrases query = replaceFields(query, textFieldName); }/*from ww w . ja v a 2 s . c o m*/ scorer = new QueryScorer(query); // grab all the text at once so Lucene's lame-ass highlighter can figure out if there are any // phrases in it... // TODO: is this the Analyzer we're looking for??? OR ... reimplement using different HL Analyzer defaultAnalyzer = new DefaultAnalyzer(); TokenStream textTokens = null; try { textTokens = defaultAnalyzer.tokenStream("xml_text", new CharSequenceReader("")); } catch (IOException e) { } init(new XmlTextTokenStream("xml_text", defaultAnalyzer, textTokens, new XdmNode(node), null, processor)); XmlReader xmlReader = new XmlReader(); xmlReader.addHandler(this); xmlReader.read(node); // setBaseURI (URI.create(node.getBaseURI())); if (getDocument().getUnderlyingNode() instanceof TinyDocumentImpl) { ((TinyDocumentImpl) getDocument().getUnderlyingNode()).setBaseURI(node.getSystemId()); } return getDocument(); }
From source file:com.napkindrawing.dbversion.task.DbVersionUpgrade.java
protected String getCompiledTemplate(Profile profile, Revision revision) { if (compiledTemplates.containsKey(profile) && compiledTemplates.get(profile).containsKey(revision.getVersion())) { return compiledTemplates.get(profile).get(revision.getVersion()); }/* www. ja v a 2 s .co m*/ CharSequenceReader templateReader = new CharSequenceReader(revision.getUpgradeScriptTemplate()); String compiledTemplate = null; try { Template fmTemplate = new Template(revision.getName(), templateReader, _fmConfig); StringWriter templateWriter = new StringWriter(); fmTemplate.process(parsedTemplateData, templateWriter); compiledTemplate = templateWriter.toString(); } catch (Exception e) { throw new BuildException(e, getLocation()); } if (!compiledTemplates.containsKey(profile)) { compiledTemplates.put(profile, new HashMap<Version, String>()); } compiledTemplates.get(profile).put(revision.getVersion(), compiledTemplate); return compiledTemplate; }
From source file:com.napkindrawing.dbversion.task.DbVersionUpgrade.java
public void applyRevision(Profile profile, Revision revision) { log("Applying revision " + revision.getVersion()); log("Upgrade Script Template:\n\n" + revision.getUpgradeScriptTemplate() + "\n\n", Project.MSG_DEBUG); String compiledTemplate = getCompiledTemplate(profile, revision); log("Upgrade Script Compiled:\n\n" + compiledTemplate + "\n\n", Project.MSG_DEBUG); try {//w w w. java2 s .c o m if (getConnection() == null) { throw new BuildException("Couldn't connect to database", getLocation()); } runStatements(new CharSequenceReader(compiledTemplate), System.out); if (!isAutocommit()) { log("Committing transaction", Project.MSG_DEBUG); getConnection().commit(); } } catch (Exception e) { throw new BuildException(e, getLocation()); } InstalledRevision installedRevision = new InstalledRevision(profile, revision); installedRevision.setUpgradeScriptCompiled(compiledTemplate); installedRevision.assignUpgradeScriptCompiledChecksum(); installedRevision.setUpgradeScriptData(getParsedTemplateData()); logRevision(installedRevision); }
From source file:org.apache.any23.mime.TikaMIMETypeDetector.java
/** * Checks if the stream contains <i>Turtle</i> triple patterns. * @param is input stream to be verified. * @param insideBlockChars TODO//from w w w.ja va 2s . c o m * @param outsideBlockChars TODO * @param switchBlockChars TODO * @return <code>true</code> if <i>Turtle</i> patterns are detected, <code>false</code> otherwise. * @throws IOException */ public static boolean checkByRioFormat(RDFFormat format, InputStream is, char[] insideBlockChars, char[] lineCommentChars, char[] outsideBlockChars, char[] switchBlockChars) throws IOException { StringBuilder sample = extractDataSample(is, '.', insideBlockChars, lineCommentChars, outsideBlockChars, switchBlockChars); RDFParser turtleParser = Rio.createParser(format); turtleParser.setDatatypeHandling(RDFParser.DatatypeHandling.VERIFY); turtleParser.setStopAtFirstError(true); turtleParser.setVerifyData(true); turtleParser.setRDFHandler(new StatementCollector()); Reader bais = new CharSequenceReader(sample); try { turtleParser.parse(bais, ""); return true; } catch (Exception e) { return false; } }