List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem close
@Override public void close() throws IOException
From source file:alluxio.underfs.hdfs.LocalMiniDFSCluster.java
License:Apache License
/** * Tests the local minidfscluster only.// w ww .ja va 2s . co m */ public static void main(String[] args) throws Exception { LocalMiniDFSCluster cluster = null; try { cluster = new LocalMiniDFSCluster("/tmp/dfs", 1, 54321); cluster.start(); System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress()); Thread.sleep(10); DistributedFileSystem dfs = cluster.getDFSClient(); dfs.mkdirs(new Path("/1")); mkdirs(cluster.getUnderFilesystemAddress() + "/1/2"); FileStatus[] fs = dfs.listStatus(new Path(AlluxioURI.SEPARATOR)); assert fs.length != 0; System.out.println(fs[0].getPath().toUri()); dfs.close(); cluster.shutdown(); cluster = new LocalMiniDFSCluster("/tmp/dfs", 3); cluster.start(); System.out.println("Address of local minidfscluster: " + cluster.getUnderFilesystemAddress()); dfs = cluster.getDFSClient(); dfs.mkdirs(new Path("/1")); UnderFileSystemUtils .touch(cluster.getUnderFilesystemAddress() + "/1" + "/_format_" + System.currentTimeMillis()); fs = dfs.listStatus(new Path("/1")); assert fs.length != 0; System.out.println(fs[0].getPath().toUri()); dfs.close(); cluster.shutdown(); } finally { if (cluster != null && cluster.isStarted()) { cluster.shutdown(); } } }
From source file:com.wandisco.s3hdfs.rewrite.filter.TestConcurrency.java
License:Apache License
public static void main(String[] args) throws Exception { Configuration conf = new HdfsConfiguration(new S3HdfsConfiguration()); DistributedFileSystem hdfs = (DistributedFileSystem) DistributedFileSystem.get(conf); PROXY_PORT = Integer.decode(conf.get(S3_PROXY_PORT_KEY, S3_PROXY_PORT_DEFAULT)); TestConcurrency test = new TestConcurrency(); test.hdfs = hdfs;//from w ww . j a v a 2s . c om test.s3Directory = conf.get(S3_DIRECTORY_KEY); test.hostName = conf.get(S3_SERVICE_HOSTNAME_KEY); test.testUtil = new S3HdfsTestUtil(test.hdfs, test.s3Directory); test.s3Service = test.testUtil.configureS3Service(test.hostName, PROXY_PORT); test.testFiveRandom(); hdfs.close(); }
From source file:org.apache.tajo.storage.TestByteBufLineReader.java
License:Apache License
@Test public void testReaderWithDFS() throws Exception { final Configuration conf = new HdfsConfiguration(); String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitClusterUp();/* w w w . j a v a 2s .c o m*/ TajoConf tajoConf = new TajoConf(conf); tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo"); Path tablePath = new Path("/testReaderWithDFS"); Path filePath = new Path(tablePath, "data.dat"); try { DistributedFileSystem fs = cluster.getFileSystem(); FSDataOutputStream out = fs.create(filePath, true); out.write(LINE.getBytes(Charset.defaultCharset())); out.write('\n'); out.close(); assertTrue(fs.exists(filePath)); FSDataInputStream inputStream = fs.open(filePath); assertTrue(inputStream.getWrappedStream() instanceof ByteBufferReadable); ByteBufLineReader lineReader = new ByteBufLineReader(new FSDataInputChannel(inputStream)); assertEquals(LINE, lineReader.readLine()); lineReader.seek(0); assertEquals(LINE, lineReader.readLine()); assertNull(lineReader.readLine()); lineReader.close(); fs.close(); } finally { cluster.shutdown(true); } }
From source file:org.apache.tajo.storage.TestFileStorageManager.java
License:Apache License
@Test public void testGetSplit() throws Exception { final Configuration conf = new HdfsConfiguration(); String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitClusterUp();//from www. j a v a2 s . c o m TajoConf tajoConf = new TajoConf(conf); tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo"); int testCount = 10; Path tablePath = new Path("/testGetSplit"); try { DistributedFileSystem fs = cluster.getFileSystem(); // Create test partitions List<Path> partitions = Lists.newArrayList(); for (int i = 0; i < testCount; i++) { Path tmpFile = new Path(tablePath, String.valueOf(i)); DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl); partitions.add(tmpFile); } assertTrue(fs.exists(tablePath)); FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf); assertEquals(fs.getUri(), sm.getFileSystem().getUri()); Schema schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV); List<Fragment> splits = Lists.newArrayList(); // Get FileFragments in partition batch splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()]))); assertEquals(testCount, splits.size()); // -1 is unknown volumeId assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]); splits.clear(); splits.addAll(sm.getSplits("data", meta, schema, partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2]))); assertEquals(testCount / 2, splits.size()); assertEquals(1, splits.get(0).getHosts().length); assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]); fs.close(); } finally { cluster.shutdown(true); } }
From source file:org.apache.tajo.storage.TestFileStorageManager.java
License:Apache License
@Test public void testGetSplitWithBlockStorageLocationsBatching() throws Exception { final Configuration conf = new HdfsConfiguration(); String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitClusterUp();/* w ww . ja va 2 s . c o m*/ TajoConf tajoConf = new TajoConf(conf); tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo"); int testCount = 10; Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching"); try { DistributedFileSystem fs = cluster.getFileSystem(); // Create test files for (int i = 0; i < testCount; i++) { Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat"); DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl); } assertTrue(fs.exists(tablePath)); FileStorageManager sm = (FileStorageManager) StorageManager.getFileStorageManager(tajoConf); assertEquals(fs.getUri(), sm.getFileSystem().getUri()); Schema schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV); List<Fragment> splits = Lists.newArrayList(); splits.addAll(sm.getSplits("data", meta, schema, tablePath)); assertEquals(testCount, splits.size()); assertEquals(2, splits.get(0).getHosts().length); assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length); assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]); fs.close(); } finally { cluster.shutdown(true); } }
From source file:org.apache.tajo.storage.TestFileTablespace.java
License:Apache License
@Test public void testGetSplit() throws Exception { final Configuration conf = new HdfsConfiguration(); String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitClusterUp();/*from w ww. j a v a 2 s . c o m*/ TajoConf tajoConf = new TajoConf(conf); tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo"); int testCount = 10; Path tablePath = new Path("/testGetSplit"); try { DistributedFileSystem fs = cluster.getFileSystem(); // Create test partitions List<Path> partitions = Lists.newArrayList(); for (int i = 0; i < testCount; i++) { Path tmpFile = new Path(tablePath, String.valueOf(i)); DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl); partitions.add(tmpFile); } assertTrue(fs.exists(tablePath)); FileTablespace space = new FileTablespace("testGetSplit", fs.getUri()); space.init(new TajoConf(conf)); assertEquals(fs.getUri(), space.getUri()); Schema schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); TableMeta meta = CatalogUtil.newTableMeta("TEXT"); List<Fragment> splits = Lists.newArrayList(); // Get FileFragments in partition batch splits.addAll(space.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()]))); assertEquals(testCount, splits.size()); // -1 is unknown volumeId assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]); splits.clear(); splits.addAll(space.getSplits("data", meta, schema, partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2]))); assertEquals(testCount / 2, splits.size()); assertEquals(1, splits.get(0).getHosts().length); assertEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]); fs.close(); } finally { cluster.shutdown(true); } }
From source file:org.apache.tajo.storage.TestFileTablespace.java
License:Apache License
@Test public void testGetSplitWithBlockStorageLocationsBatching() throws Exception { final Configuration conf = new HdfsConfiguration(); String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); cluster.waitClusterUp();//from www . ja v a 2 s . c o m TajoConf tajoConf = new TajoConf(conf); tajoConf.setVar(TajoConf.ConfVars.ROOT_DIR, cluster.getFileSystem().getUri() + "/tajo"); int testCount = 10; Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching"); try { DistributedFileSystem fs = cluster.getFileSystem(); // Create test files for (int i = 0; i < testCount; i++) { Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat"); DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl); } assertTrue(fs.exists(tablePath)); FileTablespace sm = new FileTablespace("testGetSplitWithBlockStorageLocationsBatching", fs.getUri()); sm.init(new TajoConf(conf)); assertEquals(fs.getUri(), sm.getUri()); Schema schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); TableMeta meta = CatalogUtil.newTableMeta("TEXT"); List<Fragment> splits = Lists.newArrayList(); splits.addAll(sm.getSplits("data", meta, schema, tablePath)); assertEquals(testCount, splits.size()); assertEquals(2, splits.get(0).getHosts().length); assertEquals(2, ((FileFragment) splits.get(0)).getDiskIds().length); assertNotEquals(-1, ((FileFragment) splits.get(0)).getDiskIds()[0]); fs.close(); } finally { cluster.shutdown(true); } }
From source file:org.apache.tajo.storage.TestStorageManager.java
License:Apache License
@Test public void testGetSplit() throws Exception { final Configuration conf = new HdfsConfiguration(); String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, false); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); int testCount = 10; Path tablePath = new Path("/testGetSplit"); try {//w ww . jav a 2 s .com DistributedFileSystem fs = cluster.getFileSystem(); // Create test partitions List<Path> partitions = Lists.newArrayList(); for (int i = 0; i < testCount; i++) { Path tmpFile = new Path(tablePath, String.valueOf(i)); DFSTestUtil.createFile(fs, new Path(tmpFile, "tmpfile.dat"), 10, (short) 2, 0xDEADDEADl); partitions.add(tmpFile); } assertTrue(fs.exists(tablePath)); StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath); Schema schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV); List<FileFragment> splits = Lists.newArrayList(); // Get FileFragments in partition batch splits.addAll(sm.getSplits("data", meta, schema, partitions.toArray(new Path[partitions.size()]))); assertEquals(testCount, splits.size()); // -1 is unknown volumeId assertEquals(-1, splits.get(0).getDiskIds()[0]); splits.clear(); splits.addAll(sm.getSplits("data", meta, schema, partitions.subList(0, partitions.size() / 2).toArray(new Path[partitions.size() / 2]))); assertEquals(testCount / 2, splits.size()); assertEquals(1, splits.get(0).getHosts().length); assertEquals(-1, splits.get(0).getDiskIds()[0]); fs.close(); } finally { cluster.shutdown(); File dir = new File(testDataPath); dir.delete(); } }
From source file:org.apache.tajo.storage.TestStorageManager.java
License:Apache License
@Test public void testGetSplitWithBlockStorageLocationsBatching() throws Exception { final Configuration conf = new HdfsConfiguration(); String testDataPath = TEST_PATH + "/" + UUID.randomUUID().toString(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, testDataPath); conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); conf.setBoolean(DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, true); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); int testCount = 10; Path tablePath = new Path("/testGetSplitWithBlockStorageLocationsBatching"); try {/* ww w . j a v a2 s . com*/ DistributedFileSystem fs = cluster.getFileSystem(); // Create test files for (int i = 0; i < testCount; i++) { Path tmpFile = new Path(tablePath, "tmpfile" + i + ".dat"); DFSTestUtil.createFile(fs, tmpFile, 10, (short) 2, 0xDEADDEADl); } assertTrue(fs.exists(tablePath)); StorageManager sm = StorageManager.getStorageManager(new TajoConf(conf), tablePath); Schema schema = new Schema(); schema.addColumn("id", Type.INT4); schema.addColumn("age", Type.INT4); schema.addColumn("name", Type.TEXT); TableMeta meta = CatalogUtil.newTableMeta(StoreType.CSV); List<FileFragment> splits = Lists.newArrayList(); splits.addAll(sm.getSplits("data", meta, schema, tablePath)); assertEquals(testCount, splits.size()); assertEquals(2, splits.get(0).getHosts().length); assertEquals(2, splits.get(0).getDiskIds().length); assertNotEquals(-1, splits.get(0).getDiskIds()[0]); fs.close(); } finally { cluster.shutdown(); File dir = new File(testDataPath); dir.delete(); } }
From source file:se.sics.gvod.stream.system.hops.SetupExperiment.java
License:Open Source License
public static void main(String[] args) throws IOException, HashUtil.HashBuilderException { String hopsURL = "bbc1.sics.se:26801"; Configuration conf = new Configuration(); conf.set("fs.defaultFS", hopsURL); DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(conf); String path = "/experiment"; if (!fs.isDirectory(new Path(path))) { fs.mkdirs(new Path(path)); } else {/*from ww w. j ava2 s .co m*/ fs.delete(new Path(path), true); fs.mkdirs(new Path(path)); } String uploadDirPath = path + "/upload"; fs.mkdirs(new Path(uploadDirPath)); String downloadDirPath = path + "/download"; fs.mkdirs(new Path(downloadDirPath)); String dataFile = uploadDirPath + "/file"; Random rand = new Random(1234); try (FSDataOutputStream out = fs.create(new Path(dataFile))) { for (int i = 0; i < fileSize / pieceSize; i++) { byte[] data = new byte[1024]; rand.nextBytes(data); out.write(data); out.flush(); } System.err.println("created file - expected:" + fileSize + " created:" + out.size()); } catch (IOException ex) { throw new RuntimeException(ex); } fs.close(); }