List of usage examples for org.apache.hadoop.fs FileStatus getOwner
public String getOwner()
From source file:tachyon.hadoop.TFSAclIntegrationTest.java
License:Apache License
/** * Test for {@link TFS#setOwner(Path, String, String)}. It will test both owner and group are * null.//from w w w. j av a 2 s . c o m */ @Test public void checkNullOwnerAndGroupTest() throws Exception { Path fileD = new Path("/chownfileD"); create(sTFS, fileD); FileStatus fs = sTFS.getFileStatus(fileD); String defaultOwner = fs.getOwner(); String defaultGroup = fs.getGroup(); sTFS.setOwner(fileD, null, null); fs = sTFS.getFileStatus(fileD); Assert.assertEquals(defaultOwner, fs.getOwner()); Assert.assertEquals(defaultGroup, fs.getGroup()); }
From source file:wherehows.AvroFileAnalyzer.java
License:Open Source License
@Override public DatasetJsonRecord getSchema(Path targetFilePath) throws IOException { System.out.println("avro file path : " + targetFilePath.toUri().getPath()); SeekableInput sin = new FsInput(targetFilePath, fs.getConf()); DataFileReader<GenericRecord> reader = new DataFileReader<GenericRecord>(sin, new GenericDatumReader<GenericRecord>()); String codec = reader.getMetaString("avro.codec"); long record_count = reader.getBlockCount(); String schemaString = reader.getSchema().toString(); String storage = STORAGE_TYPE; String abstractPath = targetFilePath.toUri().getPath(); FileStatus fstat = fs.getFileStatus(targetFilePath); DatasetJsonRecord datasetJsonRecord = new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(), fstat.getPermission().toString(), codec, storage, ""); reader.close();// ww w .j a v a 2 s. com sin.close(); return datasetJsonRecord; }
From source file:wherehows.OrcFileAnalyzer.java
License:Open Source License
@Override public DatasetJsonRecord getSchema(Path targetFilePath) throws IOException { Reader orcReader = OrcFile.createReader(fs, targetFilePath); String codec = String.valueOf(orcReader.getCompression()); String schemaString = orcReader.getObjectInspector().getTypeName(); String storage = STORAGE_TYPE; String abstractPath = targetFilePath.toUri().getPath(); FileStatus fstat = fs.getFileStatus(targetFilePath); DatasetJsonRecord datasetJsonRecord = new DatasetJsonRecord(schemaString, abstractPath, fstat.getModificationTime(), fstat.getOwner(), fstat.getGroup(), fstat.getPermission().toString(), codec, storage, ""); return datasetJsonRecord; }
From source file:wherehows.SchemaFetch.java
License:Open Source License
private static void scanPathHelper(Path path, FileSystem scanFs) throws IOException, InterruptedException, SQLException { String curPath = path.toUri().getPath(); Path n = path;/*from w w w .j a v a2 s . c o m*/ if (path.getName().matches("^(\\.|_|tmp|temp|test|trash|backup|archive|ARCHIVE|storkinternal).*")) return; logger.info(" -- scanPath(" + curPath + ")\n"); int x = isTable(path, scanFs); if (x > 0) { // System.err.println(" traceTable(" + path.toString() + ")"); traceTableInfo(path, scanFs); } else if (x == 0) { // iterate over each table // FileStatus[] fslist = scanFs.listStatus(path); // System.err.println(" => " + fslist.length + " subdirs"); for (FileStatus fstat : scanFs.listStatus(path)) { n = fstat.getPath(); curPath = n.toUri().getPath(); // System.err.println(" traceSubDir(" + curPath + ")"); if (n == path) { continue; } try { if (isTable(n, scanFs) > 0) { traceTableInfo(n, scanFs); } else if (scanFs.listStatus(n).length > 0 || scanFs.getContentSummary(n).getLength() > 0) { scanPath(n, scanFs); } else { logger.info("* scanPath() size = 0: " + curPath); } } catch (AccessControlException e) { logger.error("* scanPath(e) Permission denied. Cannot access: " + curPath + " owner:" + fstat.getOwner() + " group: " + fstat.getGroup() + "with current user " + UserGroupInformation.getCurrentUser()); // System.err.println(e); continue; } // catch } // end of for } // end else }