List of usage examples for org.apache.commons.io.input ReaderInputStream ReaderInputStream
public ReaderInputStream(Reader reader, String charsetName)
From source file:com.github.podd.example.ExamplePoddClient.java
public Map<Path, String> uploadToStorage(final List<Path> bagsToUpload, final String sshServerFingerprint, final String sshHost, final int portNo, final String username, final Path pathToPublicKey, final Path localRootPath, final Path remoteRootPath, final PasswordFinder keyExtractor) throws PoddClientException, NoSuchAlgorithmException, IOException { final Map<Path, String> results = new ConcurrentHashMap<>(); final ConcurrentMap<Path, ConcurrentMap<PoddDigestUtils.Algorithm, String>> digests = PoddDigestUtils .getDigests(bagsToUpload);/*from w w w .j av a 2 s . com*/ try (SSHClient sshClient = new SSHClient(ExamplePoddClient.DEFAULT_CONFIG);) { sshClient.useCompression(); sshClient.addHostKeyVerifier(sshServerFingerprint); sshClient.connect(sshHost, portNo); if (!Files.exists(pathToPublicKey)) { throw new PoddClientException("Could not find public key: " + pathToPublicKey); } if (!SecurityUtils.isBouncyCastleRegistered()) { throw new PoddClientException("Bouncy castle needed"); } final FileKeyProvider rsa = new PKCS8KeyFile(); rsa.init(pathToPublicKey.toFile(), keyExtractor); sshClient.authPublickey(username, rsa); // Session session = sshClient.startSession(); try (SFTPClient sftp = sshClient.newSFTPClient();) { for (final Path nextBag : bagsToUpload) { // Check to make sure that the bag was under the local root path final Path localPath = nextBag.toAbsolutePath(); if (!localPath.startsWith(localRootPath)) { this.log.error( "Local bag path was not a direct descendant of the local root path: {} {} {}", localRootPath, nextBag, localPath); throw new PoddClientException( "Local bag path was not a direct descendant of the local root path: " + localPath + " " + localRootPath); } // Take the local root path out to get the subpath to use on the remote final Path remoteSubPath = localPath.subpath(localRootPath.getNameCount(), nextBag.getNameCount() - 1); this.log.info("Remote sub path: {}", remoteSubPath); final Path remoteDirPath = remoteRootPath.resolve(remoteSubPath); this.log.info("Remote dir path: {}", remoteDirPath); final Path remoteBagPath = remoteDirPath.resolve(nextBag.getFileName()); this.log.info("Remote bag path: {}", remoteBagPath); boolean fileFound = false; boolean sizeCorrect = false; try { // check details of a remote bag final FileAttributes attribs = sftp.lstat(remoteBagPath.toAbsolutePath().toString()); final long localSize = Files.size(nextBag); final long remoteSize = attribs.getSize(); if (localSize <= 0) { this.log.error("Local bag was empty: {}", nextBag); sizeCorrect = false; fileFound = false; } else if (remoteSize <= 0) { this.log.warn("Remote bag was empty: {} {}", nextBag, attribs); sizeCorrect = false; fileFound = false; } else if (localSize == remoteSize) { this.log.info("Found file on remote already with same size as local: {} {}", nextBag, remoteBagPath); sizeCorrect = true; fileFound = true; } else { sizeCorrect = false; fileFound = true; // We always assume that a non-zero local file is correct // The bags contain time-stamps that will be modified when they are // regenerated, likely changing the file-size, and hopefully changing // the digest checksums // throw new PoddClientException( // "Could not automatically compare file sizes (need manual intervention to delete one) : " // + nextBag + " " + remoteBagPath + " localSize=" + localSize // + " remoteSize=" + remoteSize); } } catch (final IOException e) { // lstat() throws an IOException if the file does not exist // Ignore sizeCorrect = false; fileFound = false; } final ConcurrentMap<Algorithm, String> bagDigests = digests.get(nextBag); if (bagDigests.isEmpty()) { this.log.error("No bag digests were generated for bag: {}", nextBag); } for (final Entry<Algorithm, String> entry : bagDigests.entrySet()) { final Path localDigestPath = localPath .resolveSibling(localPath.getFileName() + entry.getKey().getExtension()); // Create the local digest file Files.copy( new ReaderInputStream(new StringReader(entry.getValue()), StandardCharsets.UTF_8), localDigestPath); final Path remoteDigestPath = remoteBagPath .resolveSibling(remoteBagPath.getFileName() + entry.getKey().getExtension()); boolean nextDigestFileFound = false; boolean nextDigestCorrect = false; try { final Path tempFile = Files.createTempFile("podd-digest-", entry.getKey().getExtension()); final SFTPFileTransfer sftpFileTransfer = new SFTPFileTransfer(sftp.getSFTPEngine()); sftpFileTransfer.download(remoteBagPath.toAbsolutePath().toString(), tempFile.toAbsolutePath().toString()); nextDigestFileFound = true; final List<String> allLines = Files.readAllLines(tempFile, StandardCharsets.UTF_8); if (allLines.isEmpty()) { nextDigestCorrect = false; } else if (allLines.size() > 1) { nextDigestCorrect = false; } // Check if the digests match exactly else if (allLines.get(0).equals(entry.getValue())) { nextDigestCorrect = true; } else { nextDigestCorrect = false; } } catch (final IOException e) { nextDigestFileFound = false; nextDigestCorrect = false; } if (nextDigestFileFound && nextDigestCorrect) { this.log.info( "Not copying digest to remote as it exists and contains the same content as the local digest"); } else if (nextDigestFileFound && !nextDigestCorrect) { this.log.error("Found remote digest but content was not correct: {} {}", localDigestPath, remoteDigestPath); sftp.rm(remoteDigestPath.toString()); this.log.info("Copying digest to remote: {}", remoteDigestPath); sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString()); } else if (!nextDigestFileFound) { this.log.info("About to make directories on remote: {}", remoteDirPath); sftp.mkdirs(remoteDirPath.toString()); this.log.info("Copying digest to remote: {}", remoteDigestPath); sftp.put(new FileSystemFile(localDigestPath.toString()), remoteDigestPath.toString()); } } if (fileFound && sizeCorrect) { this.log.info("Not copying bag to remote as it exists and is the same size as local bag"); } else if (fileFound && !sizeCorrect) { this.log.error("Found remote bag but size was not correct: {} {}", nextBag, remoteBagPath); sftp.rm(remoteBagPath.toString()); this.log.info("Copying bag to remote: {}", remoteBagPath); sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString()); } else if (!fileFound) { this.log.info("About to make directories on remote: {}", remoteDirPath); sftp.mkdirs(remoteDirPath.toString()); this.log.info("Copying bag to remote: {}", remoteBagPath); sftp.put(new FileSystemFile(localPath.toString()), remoteBagPath.toString()); } } } } catch (final IOException e) { throw new PoddClientException("Could not copy a bag to the remote location", e); } return results; }
From source file:org.apache.axis2.format.TextFromElementDataSource.java
public InputStream getInputStream() throws IOException { return new ReaderInputStream(ElementHelper.getTextAsStream(element, true), charset); }
From source file:org.apache.axis2.format.TextMessageBuilderAdapter.java
public OMElement processDocument(Reader reader, String contentType, MessageContext messageContext) throws AxisFault { String charset;//from w w w . j a v a 2 s .c o m try { ContentType ct = new ContentType(contentType); charset = ct.getParameter("charset"); } catch (ParseException ex) { charset = null; } if (charset == null) { charset = MessageContext.DEFAULT_CHAR_SET_ENCODING; } messageContext.setProperty(Constants.Configuration.CHARACTER_SET_ENCODING, charset); return processDocument(new ReaderInputStream(reader, charset), contentType, messageContext); }
From source file:org.apache.hadoop.gateway.filter.rewrite.impl.javascript.JavaScriptUrlRewriteStreamFilter.java
@Override public InputStream filter(InputStream stream, String encoding, UrlRewriter rewriter, Resolver resolver, UrlRewriter.Direction direction, UrlRewriteFilterContentDescriptor config) throws IOException { if (config != null) { return new ReaderInputStream(new JavaScriptUrlRewriteFilterReader( new InputStreamReader(stream, encoding), rewriter, resolver, direction, config), encoding); } else {//ww w .j av a 2 s . co m return stream; } }
From source file:org.apache.manifoldcf.agents.output.amazoncloudsearch.AmazonCloudSearchConnector.java
/** Test the connection. Returns a string describing the connection integrity. *@return the connection's status as a displayable string. *///from w w w . ja v a 2s . c o m @Override public String check() throws ManifoldCFException { try { getSession(); String responsbody = postData(new ReaderInputStream(new StringReader("[]"), Consts.UTF_8)); String status = ""; try { status = getStatusFromJsonResponse(responsbody); } catch (ManifoldCFException e) { Logging.ingest.debug(e); return "Could not get status from response body. Check Access Policy setting of your domain of Amazon CloudSearch.: " + e.getMessage(); } if ("error".equalsIgnoreCase(status)) { return "Connection working. responsbody : " + responsbody; } return "Connection NOT working. responsbody : " + responsbody; } catch (ServiceInterruption e) { Logging.ingest.debug(e); return "Transient exception: " + e.getMessage(); } }
From source file:org.apache.manifoldcf.agents.output.amazoncloudsearch.AmazonCloudSearchConnector.java
/** Add (or replace) a document in the output data store using the connector. * This method presumes that the connector object has been configured, and it is thus able to communicate with the output data store should that be * necessary.//w ww .j a va2 s. co m * The OutputSpecification is *not* provided to this method, because the goal is consistency, and if output is done it must be consistent with the * output description, since that was what was partly used to determine if output should be taking place. So it may be necessary for this method to decode * an output description string in order to determine what should be done. *@param documentURI is the URI of the document. The URI is presumed to be the unique identifier which the output data store will use to process * and serve the document. This URI is constructed by the repository connector which fetches the document, and is thus universal across all output connectors. *@param outputDescription is the description string that was constructed for this document by the getOutputDescription() method. *@param document is the document data to be processed (handed to the output data store). *@param authorityNameString is the name of the authority responsible for authorizing any access tokens passed in with the repository document. May be null. *@param activities is the handle to an object that the implementer of an output connector may use to perform operations, such as logging processing activity. *@return the document status (accepted or permanently rejected). */ @Override public int addOrReplaceDocumentWithException(String documentURI, VersionContext outputDescription, RepositoryDocument document, String authorityNameString, IOutputAddActivity activities) throws ManifoldCFException, ServiceInterruption, IOException { // Establish a session getSession(); String uid = ManifoldCF.hash(documentURI); // Build a JSON generator JSONObjectReader objectReader = new JSONObjectReader(); // Build the metadata field part JSONObjectReader fieldReader = new JSONObjectReader(); // Add the type and ID objectReader .addNameValuePair(new JSONNameValueReader(new JSONStringReader("id"), new JSONStringReader(uid))) .addNameValuePair( new JSONNameValueReader(new JSONStringReader("type"), new JSONStringReader("add"))) .addNameValuePair(new JSONNameValueReader(new JSONStringReader("fields"), fieldReader)); // Populate the fields... Iterator<String> itr = document.getFields(); while (itr.hasNext()) { String fieldName = itr.next(); Object[] fieldValues = document.getField(fieldName); JSONReader[] elements = new JSONReader[fieldValues.length]; if (fieldValues instanceof Reader[]) { for (int i = 0; i < elements.length; i++) { elements[i] = new JSONStringReader((Reader) fieldValues[i]); } } else if (fieldValues instanceof Date[]) { for (int i = 0; i < elements.length; i++) { elements[i] = new JSONStringReader(DateParser.formatISO8601Date((Date) fieldValues[i])); } } else if (fieldValues instanceof String[]) { for (int i = 0; i < elements.length; i++) { elements[i] = new JSONStringReader((String) fieldValues[i]); } } else throw new IllegalStateException("Unexpected metadata type: " + fieldValues.getClass().getName()); fieldReader.addNameValuePair( new JSONNameValueReader(new JSONStringReader(fieldName), new JSONArrayReader(elements))); } // Add in the original URI fieldReader.addNameValuePair(new JSONNameValueReader(new JSONStringReader(DOCUMENT_URI_FIELDNAME), new JSONStringReader(documentURI))); // Add the primary content data in. fieldReader.addNameValuePair(new JSONNameValueReader(new JSONStringReader(FILE_BODY_TEXT_FIELDNAME), new JSONStringReader(new InputStreamReader(document.getBinaryStream(), Consts.UTF_8)))); documentChunkManager.recordDocument(uid, serverHost, serverPath, documentURI, INGEST_ACTIVITY, new Long(document.getBinaryLength()), new ReaderInputStream(objectReader, Consts.UTF_8)); conditionallyFlushDocuments(activities); return DOCUMENTSTATUS_ACCEPTED; }
From source file:org.apache.manifoldcf.agents.output.amazoncloudsearch.AmazonCloudSearchConnector.java
/** Remove a document using the connector. * Note that the last outputDescription is included, since it may be necessary for the connector to use such information to know how to properly remove the document. *@param documentURI is the URI of the document. The URI is presumed to be the unique identifier which the output data store will use to process * and serve the document. This URI is constructed by the repository connector which fetches the document, and is thus universal across all output connectors. *@param outputDescription is the last description string that was constructed for this document by the getOutputDescription() method above. *@param activities is the handle to an object that the implementer of an output connector may use to perform operations, such as logging processing activity. *///ww w . j a v a 2 s. co m @Override public void removeDocument(String documentURI, String outputDescription, IOutputRemoveActivity activities) throws ManifoldCFException, ServiceInterruption { // Establish a session getSession(); String uid = ManifoldCF.hash(documentURI); // Build a JSON generator JSONObjectReader objectReader = new JSONObjectReader(); // Add the type and ID objectReader .addNameValuePair(new JSONNameValueReader(new JSONStringReader("id"), new JSONStringReader(uid))) .addNameValuePair( new JSONNameValueReader(new JSONStringReader("type"), new JSONStringReader("delete"))); try { documentChunkManager.recordDocument(uid, serverHost, serverPath, documentURI, REMOVE_ACTIVITY, null, new ReaderInputStream(objectReader, Consts.UTF_8)); } catch (IOException e) { handleIOException(e); } conditionallyFlushDocuments(activities); }
From source file:org.apache.manifoldcf.agents.output.amazoncloudsearch.AmazonCloudSearchConnector.java
protected void flushDocuments(IOutputHistoryActivity activities) throws ManifoldCFException, ServiceInterruption { Logging.ingest.info("AmazonCloudSearch: Starting flush to Amazon"); // Repeat until we are empty of cached stuff int chunkNumber = 0; while (true) { DocumentRecord[] records = documentChunkManager.readChunk(serverHost, serverPath, CHUNK_SIZE); try {// w w w.ja va2s. com if (records.length == 0) break; // The records consist of up to 1000 individual input streams, which must be all concatenated together into the post // To do that, we go into and out of Reader space once again... JSONArrayReader arrayReader = new JSONArrayReader(); for (DocumentRecord dr : records) { arrayReader.addArrayElement( new JSONValueReader(new InputStreamReader(dr.getDataStream(), Consts.UTF_8))); } //post data.. String responsbody = postData(new ReaderInputStream(arrayReader, Consts.UTF_8)); // check status String status = getStatusFromJsonResponse(responsbody); if ("success".equals(status)) { // Activity-log the individual documents we sent for (DocumentRecord dr : records) { activities.recordActivity(null, dr.getActivity(), dr.getDataSize(), dr.getUri(), "OK", null); } Logging.ingest.info("AmazonCloudSearch: Successfully sent document chunk " + chunkNumber); //remove documents from table.. documentChunkManager.deleteChunk(records); } else { // Activity-log the individual documents that failed for (DocumentRecord dr : records) { activities.recordActivity(null, dr.getActivity(), dr.getDataSize(), dr.getUri(), "FAILED", responsbody); } Logging.ingest.error("AmazonCloudSearch: Error sending document chunk " + chunkNumber + ": '" + responsbody + "'"); throw new ManifoldCFException( "Received error status from service after feeding document. Response body: '" + responsbody + "'"); } } catch (ManifoldCFException e) { if (e.getErrorCode() == ManifoldCFException.INTERRUPTED) throw e; for (DocumentRecord dr : records) { activities.recordActivity(null, dr.getActivity(), dr.getDataSize(), dr.getUri(), e.getClass().getSimpleName().toUpperCase(Locale.ROOT), e.getMessage()); } throw e; } catch (ServiceInterruption e) { for (DocumentRecord dr : records) { activities.recordActivity(null, dr.getActivity(), dr.getDataSize(), dr.getUri(), e.getClass().getSimpleName().toUpperCase(Locale.ROOT), e.getMessage()); } throw e; } finally { Throwable exception = null; for (DocumentRecord dr : records) { try { dr.close(); } catch (Throwable e) { exception = e; } } if (exception != null) { if (exception instanceof ManifoldCFException) throw (ManifoldCFException) exception; else if (exception instanceof Error) throw (Error) exception; else if (exception instanceof RuntimeException) throw (RuntimeException) exception; else throw new RuntimeException("Unknown exception class thrown: " + exception.getClass().getName() + ": " + exception.getMessage(), exception); } } } }
From source file:org.apache.maven.plugin.assembly.format.ReaderFormatter.java
@Nullable public static InputStreamTransformer getFileSetTransformers(final AssemblerConfigurationSource configSource, final boolean isFiltered, String fileSetLineEnding) throws AssemblyFormattingException { final LineEndings lineEndingToUse = LineEndingsUtils.getLineEnding(fileSetLineEnding); final boolean transformLineEndings = !LineEndings.keep.equals(lineEndingToUse); if (transformLineEndings || isFiltered) { return new InputStreamTransformer() { public InputStream transform(PlexusIoResource plexusIoResource, InputStream inputStream) throws IOException { InputStream result = inputStream; if (isFiltered) { boolean isPropertyFile = AssemblyFileUtils.isPropertyFile(plexusIoResource.getName()); final String encoding = isPropertyFile ? "ISO-8859-1" : configSource.getEncoding(); Reader source = encoding != null ? new InputStreamReader(inputStream, encoding) : new InputStreamReader(inputStream); // wtf platform encoding ? TODO: Fix this Reader filtered = createReaderFilter(source, configSource.getEscapeString(), configSource.getDelimiters(), configSource, isPropertyFile); result = encoding != null ? new ReaderInputStream(filtered, encoding) : new ReaderInputStream(filtered); }/* ww w.j a v a2 s. c o m*/ if (transformLineEndings) { checkifFileTypeIsAppropriateForLineEndingTransformation(plexusIoResource); result = LineEndingsUtils.lineEndingConverter(result, lineEndingToUse); } return result; } }; } return null; }
From source file:org.apache.maven.plugins.assembly.format.ReaderFormatter.java
@Nullable public static InputStreamTransformer getFileSetTransformers(final AssemblerConfigurationSource configSource, final boolean isFiltered, String fileSetLineEnding) throws AssemblyFormattingException { final LineEndings lineEndingToUse = LineEndingsUtils.getLineEnding(fileSetLineEnding); final boolean transformLineEndings = !LineEndings.keep.equals(lineEndingToUse); if (transformLineEndings || isFiltered) { return new InputStreamTransformer() { @Override/*from www . jav a2s.c om*/ @Nonnull public InputStream transform(@Nonnull PlexusIoResource plexusIoResource, @Nonnull InputStream inputStream) throws IOException { InputStream result = inputStream; if (isFiltered) { boolean isPropertyFile = AssemblyFileUtils.isPropertyFile(plexusIoResource.getName()); final String encoding = isPropertyFile ? "ISO-8859-1" : configSource.getEncoding(); Reader source = encoding != null ? new InputStreamReader(inputStream, encoding) : new InputStreamReader(inputStream); // wtf platform encoding ? TODO: Fix this Reader filtered = createReaderFilter(source, configSource.getEscapeString(), configSource.getDelimiters(), configSource, isPropertyFile); result = encoding != null ? new ReaderInputStream(filtered, encoding) : new ReaderInputStream(filtered); } if (transformLineEndings) { checkifFileTypeIsAppropriateForLineEndingTransformation(plexusIoResource); result = LineEndingsUtils.lineEndingConverter(result, lineEndingToUse); } return result; } }; } return null; }