List of usage examples for org.apache.commons.vfs2 FileObject getContent
FileContent getContent() throws FileSystemException;
From source file:org.pentaho.hadoop.shim.common.DistributedCacheUtilImpl.java
private void copyConfigProperties(FileObject source, FileSystem fs, Path dest) { try (FSDataOutputStream output = fs.create(dest); InputStream input = source.getContent().getInputStream()) { List<String> lines = IOUtils.readLines(input); for (String line : lines) { if (!line.startsWith(AUTH_PREFIX)) { IOUtils.write(line, output); IOUtils.write(String.format("%n"), output); }//from w w w .j av a2s . c om } } catch (IOException e) { throw new RuntimeException("Error copying modified version of config.properties", e); } }
From source file:org.pentaho.hadoop.shim.common.DistributedCacheUtilImpl.java
/** * Extract a zip archive to a directory. * * @param archive Zip archive to extract * @param dest Destination directory. This must not exist! * @return Directory the zip was extracted into * @throws IllegalArgumentException when the archive file does not exist or the destination directory already exists * @throws IOException/*from ww w. j av a 2 s .c o m*/ * @throws KettleFileException */ public FileObject extract(FileObject archive, FileObject dest) throws IOException, KettleFileException { if (!archive.exists()) { throw new IllegalArgumentException("archive does not exist: " + archive.getURL().getPath()); } if (dest.exists()) { throw new IllegalArgumentException("destination already exists"); } dest.createFolder(); try { byte[] buffer = new byte[DEFAULT_BUFFER_SIZE]; int len = 0; ZipInputStream zis = new ZipInputStream(archive.getContent().getInputStream()); try { ZipEntry ze; while ((ze = zis.getNextEntry()) != null) { FileObject entry = KettleVFS.getFileObject(dest + Const.FILE_SEPARATOR + ze.getName()); FileObject parent = entry.getParent(); if (parent != null) { parent.createFolder(); } if (ze.isDirectory()) { entry.createFolder(); continue; } OutputStream os = KettleVFS.getOutputStream(entry, false); try { while ((len = zis.read(buffer)) > 0) { os.write(buffer, 0, len); } } finally { if (os != null) { os.close(); } } } } finally { if (zis != null) { zis.close(); } } } catch (Exception ex) { // Try to clean up the temp directory and all files if (!deleteDirectory(dest)) { throw new KettleFileException("Could not clean up temp dir after error extracting", ex); } throw new KettleFileException("error extracting archive", ex); } return dest; }
From source file:org.pentaho.hadoop.shim.common.format.avro.PentahoAvroInputFormat.java
private DataFileStream<GenericRecord> createDataFileStream(String schemaFileName, String fileName) throws Exception { DatumReader<GenericRecord> datumReader; if (schemaFileName != null && schemaFileName.length() > 0) { datumReader = new GenericDatumReader<GenericRecord>(readAvroSchema(schemaFileName)); } else {/*from www. j a va 2 s . com*/ datumReader = new GenericDatumReader<GenericRecord>(); } FileObject fileObject = KettleVFS.getFileObject(fileName); if (fileObject.isFile()) { return new DataFileStream<GenericRecord>(fileObject.getContent().getInputStream(), datumReader); } else { FileObject[] avroFiles = fileObject.findFiles(new FileExtensionSelector("avro")); if (!Utils.isEmpty(avroFiles)) { return new DataFileStream<GenericRecord>(avroFiles[0].getContent().getInputStream(), datumReader); } return null; } }
From source file:org.pentaho.hadoop.shim.HadoopConfigurationLocator.java
/** * Load the properties file located at {@code file} * * @param file Location of a properties file to load * @return Loaded properties file/*from w w w . jav a 2 s . co m*/ * @throws IOException Error loading properties from file * @throws FileSystemException Error locating input stream for file */ protected Properties loadProperties(FileObject file) throws FileSystemException, IOException { Properties p = new Properties(); p.load(file.getContent().getInputStream()); return p; }
From source file:org.pentaho.hadoop.shim.HadoopConfigurationLocatorTest.java
@BeforeClass public static void setup() throws Exception { // Create a test hadoop configuration "a" FileObject ramRoot = VFS.getManager().resolveFile(HADOOP_CONFIGURATIONS_PATH); FileObject aConfigFolder = ramRoot.resolveFile("a"); if (aConfigFolder.exists()) { aConfigFolder.delete(new AllFileSelector()); }/* w w w. j a v a 2 s.co m*/ aConfigFolder.createFolder(); assertEquals(FileType.FOLDER, aConfigFolder.getType()); // Create the properties file for the configuration as hadoop-configurations/a/config.properties configFile = aConfigFolder.resolveFile("config.properties"); Properties p = new Properties(); p.setProperty("name", "Test Configuration A"); p.setProperty("classpath", ""); p.setProperty("ignore.classes", ""); p.setProperty("library.path", ""); p.setProperty("required.classes", HadoopConfigurationLocatorTest.class.getName()); p.store(configFile.getContent().getOutputStream(), "Test Configuration A"); configFile.close(); // Create the implementation jar FileObject implJar = aConfigFolder.resolveFile("a-config.jar"); implJar.createFile(); // Use ShrinkWrap to create the jar and write it out to VFS JavaArchive archive = ShrinkWrap.create(JavaArchive.class, "a-configuration.jar") .addAsServiceProvider(HadoopShim.class, MockHadoopShim.class).addClass(MockHadoopShim.class); archive.as(ZipExporter.class).exportTo(implJar.getContent().getOutputStream()); }
From source file:org.pentaho.hadoop.shim.hsp101.HadoopShim.java
@Override public void onLoad(HadoopConfiguration config, HadoopConfigurationFileSystemManager fsm) throws Exception { fsm.addProvider(config, "hdfs", config.getIdentifier(), new HDFSFileProvider()); setDistributedCacheUtil(new DistributedCacheUtilImpl(config) { /**/*from w w w .j av a 2 s . co m*/ * Default permission for cached files * <p/> * Not using FsPermission.createImmutable due to EOFExceptions when using it with Hadoop 0.20.2 */ private final FsPermission CACHED_FILE_PERMISSION = new FsPermission((short) 0755); public void addFileToClassPath(Path file, Configuration conf) throws IOException { String classpath = conf.get("mapred.job.classpath.files"); conf.set("mapred.job.classpath.files", classpath == null ? file.toString() : classpath + getClusterPathSeparator() + file.toString()); FileSystem fs = FileSystem.get(conf); URI uri = fs.makeQualified(file).toUri(); DistributedCache.addCacheFile(uri, conf); } /** * Stages the source file or folder to a Hadoop file system and sets their permission and replication * value appropriately to be used with the Distributed Cache. WARNING: This will delete the contents of * dest before staging the archive. * * @param source File or folder to copy to the file system. If it is a folder all contents will be * copied into dest. * @param fs Hadoop file system to store the contents of the archive in * @param dest Destination to copy source into. If source is a file, the new file name will be * exactly dest. If source is a folder its contents will be copied into dest. For more * info see {@link FileSystem#copyFromLocalFile(org.apache.hadoop.fs.Path, * org.apache.hadoop.fs.Path)}. * @param overwrite Should an existing file or folder be overwritten? If not an exception will be * thrown. * @throws IOException Destination exists is not a directory * @throws KettleFileException Source does not exist or destination exists and overwrite is false. */ public void stageForCache(FileObject source, FileSystem fs, Path dest, boolean overwrite) throws IOException, KettleFileException { if (!source.exists()) { throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class, "DistributedCacheUtil.SourceDoesNotExist", source)); } if (fs.exists(dest)) { if (overwrite) { // It is a directory, clear it out fs.delete(dest, true); } else { throw new KettleFileException(BaseMessages.getString(DistributedCacheUtilImpl.class, "DistributedCacheUtil.DestinationExists", dest.toUri().getPath())); } } // Use the same replication we'd use for submitting jobs short replication = (short) fs.getConf().getInt("mapred.submit.replication", 10); copyFile(source, fs, dest, overwrite); fs.setReplication(dest, replication); } private void copyFile(FileObject source, FileSystem fs, Path dest, boolean overwrite) throws IOException { if (source.getType() == FileType.FOLDER) { fs.mkdirs(dest); fs.setPermission(dest, CACHED_FILE_PERMISSION); for (FileObject fileObject : source.getChildren()) { copyFile(fileObject, fs, new Path(dest, fileObject.getName().getBaseName()), overwrite); } } else { try (FSDataOutputStream fsDataOutputStream = fs.create(dest, overwrite)) { IOUtils.copy(source.getContent().getInputStream(), fsDataOutputStream); fs.setPermission(dest, CACHED_FILE_PERMISSION); } } } public String getClusterPathSeparator() { return System.getProperty("hadoop.cluster.path.separator", ","); } }); }
From source file:org.pentaho.metaverse.impl.VfsLineageCollector.java
@Override public void compressArtifacts(List<String> paths, OutputStream os) { ZipOutputStream zos = null;//from w w w .j a v a 2s . com try { FileSystemOptions opts = new FileSystemOptions(); zos = new ZipOutputStream(os); for (String path : paths) { FileObject file = KettleVFS.getFileObject(path, opts); try { // register the file as an entry in the zip file ZipEntry zipEntry = new ZipEntry(file.getName().getPath()); zos.putNextEntry(zipEntry); // write the file's bytes to the zip stream try (InputStream fis = file.getContent().getInputStream()) { zos.write(IOUtils.toByteArray(fis)); } } catch (IOException e) { log.error(Messages.getString("ERROR.FailedAddingFileToZip", file.getName().getPath())); } finally { // indicate we are done with this file try { zos.closeEntry(); } catch (IOException e) { log.error(Messages.getString("ERROR.FailedToProperlyCloseZipEntry", file.getName().getPath())); } } } } catch (KettleFileException e) { log.error(Messages.getString("ERROR.UnexpectedVfsError", e.getMessage())); } finally { IOUtils.closeQuietly(zos); } }
From source file:org.pentaho.metaverse.impl.VfsLineageWriter.java
protected OutputStream createOutputStream(LineageHolder holder, String extension) { if (holder != null) { try {//from w w w. j a v a 2s. c o m IExecutionProfile profile = holder.getExecutionProfile(); String timestampString = Long.toString(profile.getExecutionData().getStartTime().getTime()); FileObject destFolder = getOutputDirectoryAsFile(holder); String name = Const.NVL(profile.getName(), "unknown"); FileObject file = destFolder.resolveFile(timestampString + "_" + name + extension); FileContent content = file.getContent(); return content.getOutputStream(); } catch (Exception e) { log.error(Messages.getErrorString("ERROR.CantCreateOutputStream"), e); return null; } } else { return null; } }
From source file:org.pentaho.platform.pdi.vfs.MetadataToMondrianVfsTest.java
@Test public void testVfs() throws Exception { ((DefaultFileSystemManager) VFS.getManager()).addProvider("mtm", new MetadataToMondrianVfs()); FileSystemManager fsManager = VFS.getManager(); FileObject fobj = fsManager.resolveFile("mtm:src/test/resources/example_olap.xmi"); StringBuilder buf = new StringBuilder(1000); InputStream in = fobj.getContent().getInputStream(); int n;// w w w. j a v a2 s. c o m while ((n = in.read()) != -1) { buf.append((char) n); } in.close(); String results = buf.toString(); Assert.assertTrue(results.indexOf("<Cube name=\"customer2 Table\">") >= 0); }
From source file:org.pentaho.platform.repository.solution.filebased.FileObjectTestHelper.java
public static FileObject mockFile(final String contents, final boolean exists) throws FileSystemException { FileObject fileObject = mock(FileObject.class); when(fileObject.exists()).thenReturn(exists); FileContent fileContent = mock(FileContent.class); when(fileObject.getContent()).thenReturn(fileContent); when(fileContent.getInputStream()).thenReturn(IOUtils.toInputStream(contents)); final FileObject parent = mock(FileObject.class); when(fileObject.getParent()).thenReturn(parent); final FileName fileName = mock(FileName.class); when(parent.getName()).thenReturn(fileName); when(fileName.getURI()).thenReturn("mondrian:/catalog"); return fileObject; }