List of usage examples for org.apache.hadoop.fs FileStatus getAccessTime
public long getAccessTime()
From source file:org.apache.ambari.view.filebrowser.HdfsApi.java
License:Apache License
/** * Converts a Hadoop <code>FileStatus</code> object into a JSON array object. * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the * specified URL./* ww w .j av a 2 s. c om*/ * <p/> * * @param status * Hadoop file status. * @return The JSON representation of the file status. */ public Map<String, Object> fileStatusToJSON(FileStatus status) { Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString()); json.put("replication", status.getReplication()); json.put("isDirectory", status.isDirectory()); json.put("len", status.getLen()); json.put("owner", status.getOwner()); json.put("group", status.getGroup()); json.put("permission", permissionToString(status.getPermission())); json.put("accessTime", status.getAccessTime()); json.put("modificationTime", status.getModificationTime()); json.put("blockSize", status.getBlockSize()); json.put("replication", status.getReplication()); json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi)); json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi)); json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi)); return json; }
From source file:org.apache.ambari.view.hive.utils.HdfsApi.java
License:Apache License
/** * Converts a Hadoop <code>FileStatus</code> object into a JSON array object. * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the * specified URL./*from w ww . j a va2s . c om*/ * <p/> * * @param status * Hadoop file status. * @return The JSON representation of the file status. */ public static Map<String, Object> fileStatusToJSON(FileStatus status) { Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put("path", status.getPath().toString()); json.put("isDirectory", status.isDirectory()); json.put("len", status.getLen()); json.put("owner", status.getOwner()); json.put("group", status.getGroup()); json.put("permission", permissionToString(status.getPermission())); json.put("accessTime", status.getAccessTime()); json.put("modificationTime", status.getModificationTime()); json.put("blockSize", status.getBlockSize()); json.put("replication", status.getReplication()); return json; }
From source file:org.apache.ambari.view.utils.hdfs.HdfsApi.java
License:Apache License
/** * Converts a Hadoop <code>FileStatus</code> object into a JSON array object. * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the * specified URL.//from w ww .java 2s .c o m * <p/> * * @param status * Hadoop file status. * @return The JSON representation of the file status. */ public Map<String, Object> fileStatusToJSON(FileStatus status) { Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString()); json.put("replication", status.getReplication()); json.put("isDirectory", status.isDirectory()); json.put("len", status.getLen()); json.put("owner", status.getOwner()); json.put("group", status.getGroup()); json.put("permission", permissionToString(status.getPermission())); json.put("accessTime", status.getAccessTime()); json.put("modificationTime", status.getModificationTime()); json.put("blockSize", status.getBlockSize()); json.put("replication", status.getReplication()); json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi)); json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi)); json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi)); return json; }
From source file:org.apache.drill.exec.planner.sql.handlers.ShowFileHandler.java
License:Apache License
@Override public PhysicalPlan getPlan(SqlNode sqlNode) throws ValidationException, RelConversionException, IOException { SqlIdentifier from = ((SqlShowFiles) sqlNode).getDb(); DrillFileSystem fs = null;/*from w w w .ja v a 2s .com*/ String defaultLocation = null; String fromDir = "./"; SchemaPlus defaultSchema = config.getConverter().getDefaultSchema(); SchemaPlus drillSchema = defaultSchema; // Show files can be used without from clause, in which case we display the files in the default schema if (from != null) { // We are not sure if the full from clause is just the schema or includes table name, // first try to see if the full path specified is a schema drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names); if (drillSchema == null) { // Entire from clause is not a schema, try to obtain the schema without the last part of the specified clause. drillSchema = SchemaUtilites.findSchema(defaultSchema, from.names.subList(0, from.names.size() - 1)); fromDir = fromDir + from.names.get((from.names.size() - 1)); } if (drillSchema == null) { throw UserException.validationError().message("Invalid FROM/IN clause [%s]", from.toString()) .build(logger); } } WorkspaceSchema wsSchema; try { wsSchema = (WorkspaceSchema) drillSchema.unwrap(AbstractSchema.class).getDefaultSchema(); } catch (ClassCastException e) { throw UserException.validationError().message( "SHOW FILES is supported in workspace type schema only. Schema [%s] is not a workspace schema.", SchemaUtilites.getSchemaPath(drillSchema)).build(logger); } // Get the file system object fs = wsSchema.getFS(); // Get the default path defaultLocation = wsSchema.getDefaultLocation(); List<ShowFilesCommandResult> rows = new ArrayList<>(); for (FileStatus fileStatus : fs.list(false, new Path(defaultLocation, fromDir))) { ShowFilesCommandResult result = new ShowFilesCommandResult(fileStatus.getPath().getName(), fileStatus.isDir(), !fileStatus.isDir(), fileStatus.getLen(), fileStatus.getOwner(), fileStatus.getGroup(), fileStatus.getPermission().toString(), fileStatus.getAccessTime(), fileStatus.getModificationTime()); rows.add(result); } return DirectPlan.createDirectPlan(context.getCurrentEndpoint(), rows.iterator(), ShowFilesCommandResult.class); }
From source file:org.apache.falcon.hadoop.JailedFileSystem.java
License:Apache License
@Override public FileStatus[] listStatus(Path f) throws IOException { FileStatus[] fileStatuses = localFS.listStatus(toLocalPath(f)); if (fileStatuses == null || fileStatuses.length == 0) { return fileStatuses; } else {// w w w .j a v a 2 s . c om FileStatus[] jailFileStatuses = new FileStatus[fileStatuses.length]; for (int index = 0; index < fileStatuses.length; index++) { FileStatus status = fileStatuses[index]; jailFileStatuses[index] = new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(), status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(), status.getOwner(), status.getGroup(), fromLocalPath(status.getPath()).makeQualified(this.getUri(), this.getWorkingDirectory())); } return jailFileStatuses; } }
From source file:org.apache.falcon.hadoop.JailedFileSystem.java
License:Apache License
@Override public FileStatus getFileStatus(Path f) throws IOException { FileStatus status = localFS.getFileStatus(toLocalPath(f)); if (status == null) { return null; }// w ww . ja v a 2 s . c o m return new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(), status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(), status.getOwner(), status.getGroup(), fromLocalPath(status.getPath()).makeQualified(this.getUri(), this.getWorkingDirectory())); }
From source file:org.apache.hive.common.util.MockFileSystem.java
License:Apache License
public void touch(MockFile file) { if (fileStatusMap.containsKey(file)) { FileStatus fileStatus = fileStatusMap.get(file); FileStatus fileStatusNew = new FileStatus(fileStatus.getLen(), fileStatus.isDirectory(), fileStatus.getReplication(), fileStatus.getBlockSize(), fileStatus.getModificationTime() + 1, fileStatus.getAccessTime(), fileStatus.getPermission(), fileStatus.getOwner(), fileStatus.getGroup(), fileStatus.getPath()); fileStatusMap.put(file, fileStatusNew); }//w w w . ja v a 2 s.com }
From source file:org.apache.ignite.hadoop.fs.IgniteHadoopIgfsSecondaryFileSystem.java
License:Apache License
/** {@inheritDoc} */ @Override/* w ww . j a va 2 s . co m*/ public IgfsFile info(final IgfsPath path) { try { final FileStatus status = fileSys.getFileStatus(convert(path)); if (status == null) return null; final Map<String, String> props = properties(status); return new IgfsFile() { @Override public IgfsPath path() { return path; } @Override public boolean isFile() { return status.isFile(); } @Override public boolean isDirectory() { return status.isDirectory(); } @Override public int blockSize() { // By convention directory has blockSize == 0, while file has blockSize > 0: return isDirectory() ? 0 : (int) status.getBlockSize(); } @Override public long groupBlockSize() { return status.getBlockSize(); } @Override public long accessTime() { return status.getAccessTime(); } @Override public long modificationTime() { return status.getModificationTime(); } @Override public String property(String name) throws IllegalArgumentException { String val = props.get(name); if (val == null) throw new IllegalArgumentException( "File property not found [path=" + path + ", name=" + name + ']'); return val; } @Nullable @Override public String property(String name, @Nullable String dfltVal) { String val = props.get(name); return val == null ? dfltVal : val; } @Override public long length() { return status.getLen(); } /** {@inheritDoc} */ @Override public Map<String, String> properties() { return props; } }; } catch (FileNotFoundException ignore) { return null; } catch (IOException e) { throw handleSecondaryFsError(e, "Failed to get file status [path=" + path + "]"); } }
From source file:org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.java
License:Apache License
/** * Convert a file status obtained from the secondary file system to a status of the primary file system. * * @param status Secondary file system status. * @return Primary file system status.//from w ww.j a va 2s . c om */ @SuppressWarnings("deprecation") private FileStatus toPrimary(FileStatus status) { return status != null ? new FileStatus(status.getLen(), status.isDir(), status.getReplication(), status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(), status.getOwner(), status.getGroup(), toPrimary(status.getPath())) : null; }
From source file:org.apache.ignite.hadoop.fs.v2.IgniteHadoopFileSystem.java
License:Apache License
/** * Convert a file status obtained from the secondary file system to a status of the primary file system. * * @param status Secondary file system status. * @return Primary file system status.//from w w w . j a v a 2 s . c o m */ private FileStatus toPrimary(FileStatus status) { return status != null ? new FileStatus(status.getLen(), status.isDirectory(), status.getReplication(), status.getBlockSize(), status.getModificationTime(), status.getAccessTime(), status.getPermission(), status.getOwner(), status.getGroup(), toPrimary(status.getPath())) : null; }