List of usage examples for org.apache.hadoop.fs FileSystem initialize
public void initialize(URI name, Configuration conf) throws IOException
From source file:com.datatorrent.stram.debug.TupleRecorderTest.java
License:Apache License
@Test public void testRecorder() throws IOException { FileSystem fs = new LocalFileSystem(); try {//from w ww . ja v a 2 s .c o m TupleRecorder recorder = new TupleRecorder(null, "application_test_id_1"); recorder.getStorage().setBytesPerPartFile(4096); recorder.getStorage().setLocalMode(true); recorder.getStorage().setBasePath("file://" + testWorkDir.getAbsolutePath() + "/recordings"); recorder.addInputPortInfo("ip1", "str1"); recorder.addInputPortInfo("ip2", "str2"); recorder.addInputPortInfo("ip3", "str3"); recorder.addOutputPortInfo("op1", "str4"); recorder.setup(null, null); recorder.beginWindow(1000); recorder.beginWindow(1000); recorder.beginWindow(1000); Tuple t1 = new Tuple(); t1.key = "speed"; t1.value = "5m/h"; recorder.writeTuple(t1, "ip1"); recorder.endWindow(); Tuple t2 = new Tuple(); t2.key = "speed"; t2.value = "4m/h"; recorder.writeTuple(t2, "ip3"); recorder.endWindow(); Tuple t3 = new Tuple(); t3.key = "speed"; t3.value = "6m/h"; recorder.writeTuple(t3, "ip2"); recorder.endWindow(); recorder.beginWindow(1000); Tuple t4 = new Tuple(); t4.key = "speed"; t4.value = "2m/h"; recorder.writeTuple(t4, "op1"); recorder.endWindow(); recorder.teardown(); fs.initialize((new Path(recorder.getStorage().getBasePath()).toUri()), new Configuration()); Path path; FSDataInputStream is; String line; BufferedReader br; path = new Path(recorder.getStorage().getBasePath(), FSPartFileCollection.INDEX_FILE); is = fs.open(path); br = new BufferedReader(new InputStreamReader(is)); line = br.readLine(); // Assert.assertEquals("check index", "B:1000:T:0:part0.txt", line); Assert.assertTrue("check index", line.matches( "F:part0.txt:\\d+-\\d+:4:T:1000-1000:33:\\{\"3\":\"1\",\"1\":\"1\",\"0\":\"1\",\"2\":\"1\"\\}")); path = new Path(recorder.getStorage().getBasePath(), FSPartFileCollection.META_FILE); is = fs.open(path); br = new BufferedReader(new InputStreamReader(is)); ObjectMapper mapper = new ObjectMapper(); line = br.readLine(); Assert.assertEquals("check version", "1.2", line); br.readLine(); // RecordInfo //RecordInfo ri = mapper.readValue(line, RecordInfo.class); line = br.readLine(); PortInfo pi = mapper.readValue(line, PortInfo.class); Assert.assertEquals("port1", recorder.getPortInfoMap().get(pi.name).id, pi.id); Assert.assertEquals("port1", recorder.getPortInfoMap().get(pi.name).type, pi.type); line = br.readLine(); pi = mapper.readValue(line, PortInfo.class); Assert.assertEquals("port2", recorder.getPortInfoMap().get(pi.name).id, pi.id); Assert.assertEquals("port2", recorder.getPortInfoMap().get(pi.name).type, pi.type); line = br.readLine(); pi = mapper.readValue(line, PortInfo.class); Assert.assertEquals("port3", recorder.getPortInfoMap().get(pi.name).id, pi.id); Assert.assertEquals("port3", recorder.getPortInfoMap().get(pi.name).type, pi.type); line = br.readLine(); pi = mapper.readValue(line, PortInfo.class); Assert.assertEquals("port4", recorder.getPortInfoMap().get(pi.name).id, pi.id); Assert.assertEquals("port4", recorder.getPortInfoMap().get(pi.name).type, pi.type); Assert.assertEquals("port size", 4, recorder.getPortInfoMap().size()); //line = br.readLine(); path = new Path(recorder.getStorage().getBasePath(), "part0.txt"); is = fs.open(path); br = new BufferedReader(new InputStreamReader(is)); line = br.readLine(); Assert.assertTrue("check part0", line.startsWith("B:")); Assert.assertTrue("check part0", line.endsWith(":1000")); line = br.readLine(); Assert.assertTrue("check part0 1", line.startsWith("T:")); Assert.assertTrue("check part0 1", line.endsWith(":0:30:{\"key\":\"speed\",\"value\":\"5m/h\"}")); line = br.readLine(); Assert.assertTrue("check part0 2", line.startsWith("T:")); Assert.assertTrue("check part0 2", line.endsWith(":2:30:{\"key\":\"speed\",\"value\":\"4m/h\"}")); line = br.readLine(); Assert.assertTrue("check part0 3", line.startsWith("T:")); Assert.assertTrue("check part0 3", line.endsWith(":1:30:{\"key\":\"speed\",\"value\":\"6m/h\"}")); line = br.readLine(); Assert.assertTrue("check part0 4", line.startsWith("T:")); Assert.assertTrue("check part0 4", line.endsWith(":3:30:{\"key\":\"speed\",\"value\":\"2m/h\"}")); line = br.readLine(); Assert.assertTrue("check part0 5", line.startsWith("E:")); Assert.assertTrue("check part0 5", line.endsWith(":1000")); } catch (IOException ex) { throw new RuntimeException(ex); } finally { fs.close(); } }
From source file:com.google.cloud.hadoop.fs.gcs.GoogleHadoopFileSystemTestHelper.java
License:Open Source License
/** * Helper for plumbing through an initUri and creating the proper Configuration object. * Calls FileSystem.initialize on {@code ghfs}. *///from w ww. j a va2s .co m private static void initializeInMemoryFileSystem(FileSystem ghfs, String initUriString) throws IOException { URI initUri; try { initUri = new URI(initUriString); } catch (URISyntaxException e) { throw new IllegalArgumentException(e); } String systemBucketName = "fake-test-system-bucket"; Configuration config = new Configuration(); config.set(GoogleHadoopFileSystemBase.GCS_SYSTEM_BUCKET_KEY, systemBucketName); config.setBoolean(GoogleHadoopFileSystemBase.GCS_CREATE_SYSTEM_BUCKET_KEY, true); ghfs.initialize(initUri, config); }
From source file:com.ibm.stocator.fs.swift2d.systemtests.StreamingSwiftTest.java
License:Open Source License
public void accessPublicSwiftContainerWithSpaceTest() throws Exception { FileSystem fs = new ObjectStoreFileSystem(); Configuration conf = new Configuration(); String uriString = conf.get("fs.swift2d.test.uri"); Assume.assumeNotNull(uriString);/*from w w w . jav a2s . c om*/ // adding suffix with space to the container name String scheme = "swift2d"; String host = getHost(URI.create(uriString)); // String origContainerName = getContainerName(host); // String newContainerName = origContainerName + " t"; // uriString = uriString.replace(origContainerName, newContainerName); // use URI ctor that encodes authority according to the rules specified // in RFC 2396, section 5.2, step 7 URI publicContainerURI = new URI(scheme, getHost(URI.create(uriString)), "/", null, null); fs.initialize(publicContainerURI, conf); FileStatus objectFS = null; try { objectFS = fs.getFileStatus(new Path(publicContainerURI)); } catch (Exception e) { e.printStackTrace(); Assert.assertNotNull("Unable to access public object ", objectFS); } }
From source file:com.ibm.stocator.fs.swift2d.systemtests.StreamingSwiftTest.java
License:Open Source License
@Test public void accessObjectWithSpaceTest() throws Exception { FileSystem fs = new ObjectStoreFileSystem(); Configuration conf = new Configuration(); String uriString = conf.get("fs.swift2d.test.uri"); Assume.assumeNotNull(uriString);//from www.ja v a2 s. com // adding suffix with space to the container name String scheme = "swift2d"; String objectName = "/a/testObject.txt"; URI publicContainerURI = new URI(uriString + objectName); // initialize file system fs.initialize(publicContainerURI, conf); FileStatus objectFS = null; Path f = null; try { FSDataOutputStream fsDataOutputStream = null; String currObjName = null; for (int i = 0; i < 5; i++) { currObjName = objectName + String.valueOf(i); // create timer createObjectTimer(90000.0, currObjName); publicContainerURI = new URI(scheme + "://" + getHost(URI.create(uriString)) + "/" + currObjName); f = new Path(publicContainerURI.toString()); fsDataOutputStream = fs.create(f); String line = null; while (!objectExpired) { // generates input byte[] bytes = new byte[0]; line = "\"2017-7-15 3:6:43\"," + String.valueOf(Math.random()) + ",6,18" + "\n"; ByteBuffer linesBB = ByteBuffer.wrap(line.getBytes()); bytes = new byte[linesBB.limit()]; linesBB.get(bytes); // writes to output fsDataOutputStream.write(bytes); // simulate delays in input Thread.sleep(50); } fsDataOutputStream.close(); objectExpired = false; } } catch (Exception e) { e.printStackTrace(); Assert.assertNotNull("Unable to access public object.", objectFS); } finally { fs.delete(f, true); } }
From source file:com.taobao.datax.plugins.common.DFSUtils.java
License:Open Source License
/** * Initialize handle of {@link FileSystem}. * /*from www .j a v a2s. c o m*/ * @param uri * URI * * @param conf * {@link Configuration} * * @return an FileSystem instance */ public static FileSystem createFileSystem(URI uri, Configuration conf) throws IOException { Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null); if (clazz == null) { throw new IOException("No FileSystem for scheme: " + uri.getScheme()); } FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, conf); fs.initialize(uri, conf); return fs; }
From source file:gobblin.yarn.GobblinYarnAppLauncher.java
License:Apache License
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException { FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem()); rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration()); LogCopier.Builder builder = LogCopier.newBuilder().useSrcFileSystem(this.fs).useDestFileSystem(rawLocalFs) .readFrom(getHdfsLogDir(appWorkDir)).writeTo(sinkLogDir).acceptsLogFileExtensions( ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR)); if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)) { builder.useMaxBytesPerLogFile(config.getBytes(GobblinYarnConfigurationKeys.LOG_COPIER_MAX_FILE_SIZE)); }//w w w . j a v a2 s . c o m if (config.hasPath(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)) { builder.useScheduler(config.getString(GobblinYarnConfigurationKeys.LOG_COPIER_SCHEDULER)); } return builder.build(); }
From source file:org.apache.ambari.fast_hdfs_resource.Runner.java
License:Apache License
public static void main(String[] args) throws IOException, URISyntaxException { // 1 - Check arguments if (args.length != 1) { System.err.println(//from w w w .j a va 2 s.c om "Incorrect number of arguments. Please provide:\n" + "1) Path to json file\n" + "Exiting..."); System.exit(1); } // 2 - Check if json-file exists final String jsonFilePath = args[0]; File file = new File(jsonFilePath); if (!file.isFile()) { System.err.println("File " + jsonFilePath + " doesn't exist.\nExiting..."); System.exit(1); } Gson gson = new Gson(); Resource[] resources = null; FileSystem dfs = null; try { Configuration conf = new Configuration(); dfs = FileSystem.get(conf); // 3 - Load data from JSON resources = (Resource[]) gson.fromJson(new FileReader(jsonFilePath), Resource[].class); // 4 - Connect to HDFS System.out.println("Using filesystem uri: " + FileSystem.getDefaultUri(conf).toString()); dfs.initialize(FileSystem.getDefaultUri(conf), conf); for (Resource resource : resources) { System.out.println("Creating: " + resource); Resource.checkResourceParameters(resource, dfs); Path pathHadoop = new Path(resource.getTarget()); if (!resource.isManageIfExists() && dfs.exists(pathHadoop)) { System.out.println("Skipping the operation for not managed DFS directory " + resource.getTarget() + " since immutable_paths contains it."); continue; } if (resource.getAction().equals("create")) { // 5 - Create Resource.createResource(resource, dfs, pathHadoop); Resource.setMode(resource, dfs, pathHadoop); Resource.setOwner(resource, dfs, pathHadoop); } else if (resource.getAction().equals("delete")) { // 6 - Delete dfs.delete(pathHadoop, true); } } } catch (Exception e) { System.out.println("Exception occurred, Reason: " + e.getMessage()); e.printStackTrace(); } finally { dfs.close(); } System.out.println("All resources created."); }
From source file:org.apache.gobblin.yarn.GobblinYarnAppLauncher.java
License:Apache License
private LogCopier buildLogCopier(Config config, Path sinkLogDir, Path appWorkDir) throws IOException { FileSystem rawLocalFs = this.closer.register(new RawLocalFileSystem()); rawLocalFs.initialize(URI.create(ConfigurationKeys.LOCAL_FS_URI), new Configuration()); LogCopier.Builder builder = LogCopier.newBuilder().useSrcFileSystem(this.fs).useDestFileSystem(rawLocalFs) .readFrom(getHdfsLogDir(appWorkDir)).writeTo(sinkLogDir).acceptsLogFileExtensions( ImmutableSet.of(ApplicationConstants.STDOUT, ApplicationConstants.STDERR)); return builder.build(); }
From source file:org.hypertable.DfsBroker.hadoop.HadoopBroker.java
License:Open Source License
/** * Returns a brand new instance of the FileSystem * //from w ww.j a v a2 s . c o m * @return A new instance of the filesystem */ private FileSystem newInstanceFileSystem() throws IOException { URI uri = FileSystem.getDefaultUri(mConf); Class<?> clazz = FileSystem.getFileSystemClass(uri.getScheme(), mConf); if (clazz == null) throw new IOException("HdfsBroker: No FileSystem for scheme: " + uri.getScheme()); FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, mConf); fs.initialize(uri, mConf); return fs; }
From source file:org.hypertable.FsBroker.hadoop.HadoopBroker.java
License:Open Source License
/** * Returns a brand new instance of the FileSystem. It does not use * the FileSystem.Cache. In newer versions of HDFS, we can directly * invoke FileSystem.newInstance(Configuration). * // ww w. j ava 2 s. c o m * @param conf Configuration * @return A new instance of the filesystem */ private static FileSystem newInstanceFileSystem(Configuration conf) throws IOException { URI uri = FileSystem.getDefaultUri(conf); Class<?> clazz = conf.getClass("fs." + uri.getScheme() + ".impl", null); if (clazz == null) { throw new IOException("No FileSystem for scheme: " + uri.getScheme()); } FileSystem fs = (FileSystem) ReflectionUtils.newInstance(clazz, conf); fs.initialize(uri, conf); return fs; }