List of usage examples for org.apache.hadoop.fs FileStatus getAccessTime
public long getAccessTime()
From source file:com.pivotal.hawq.mapreduce.parquet.HAWQParquetInputFormat.java
License:Apache License
@Override protected List<FileStatus> listStatus(JobContext jobContext) throws IOException { List<FileStatus> result = Lists.newArrayList(); for (HAWQFileStatus hawqFileStatus : hawqFileStatuses) { if (hawqFileStatus.getFileLength() == 0) continue; // skip empty file Path path = new Path(hawqFileStatus.getFilePath()); FileSystem fs = path.getFileSystem(jobContext.getConfiguration()); FileStatus dfsStat = fs.getFileStatus(path); // rewrite file length because HAWQ records the logicalEOF of file, which may // be smaller than the file's actual EOF FileStatus hawqStat = new FileStatus(hawqFileStatus.getFileLength(), // rewrite to logicalEOF dfsStat.isDirectory(), dfsStat.getReplication(), dfsStat.getBlockSize(), dfsStat.getModificationTime(), dfsStat.getAccessTime(), dfsStat.getPermission(), dfsStat.getOwner(), dfsStat.getGroup(), dfsStat.getPath()); result.add(hawqStat);// w w w. jav a 2 s. co m } return result; }
From source file:com.rim.logdriver.admin.HFind.java
License:Apache License
@Override public int run(String[] args) throws Exception { final long startTime = System.currentTimeMillis(); int i = 0;// ww w . j a v a 2 s . com while (i < args.length) { if (args[i].startsWith("-")) { break; } Path path = new Path(args[i]); FileSystem fs = path.getFileSystem(getConf()); FileStatus[] fileStatuses = fs.globStatus(path); if (fileStatuses != null) { for (FileStatus fileStatus : fileStatuses) { paths.add(fileStatus.getPath()); fileStatusCache.put(fileStatus.getPath(), fileStatus); } } i++; } while (i < args.length) { // -print action if ("-print".equals(args[i])) { actions.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { System.out.println(fileStatus.getPath()); return true; } }); } // -delete action if ("-delete".equals(args[i])) { actions.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { try { FileSystem fs = fileStatus.getPath().getFileSystem(getConf()); if (!fileStatus.isDir() || fs.listStatus(fileStatus.getPath()).length == 0) { return fs.delete(fileStatus.getPath(), true); } } catch (IOException e) { e.printStackTrace(); } return false; } }); } // -atime test else if ("-atime".equals(args[i])) { i++; if (i >= args.length) { System.err.println("Missing arguement for -atime"); System.exit(1); } String t = args[i]; if (t.charAt(0) == '+') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getAccessTime()) / (24 * 60 * 60 * 1000) > time) { return true; } else { return false; } } }); } else if (t.charAt(0) == '-') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getAccessTime()) / (24 * 60 * 60 * 1000) < time) { return true; } else { return false; } } }); } else { final long time = Long.parseLong(t); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getAccessTime()) / (24 * 60 * 60 * 1000) == time) { return true; } else { return false; } } }); } } // -mtime test else if ("-mtime".equals(args[i])) { i++; if (i >= args.length) { System.err.println("Missing arguement for -mtime"); System.exit(1); } String t = args[i]; if (t.charAt(0) == '+') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getModificationTime()) / (24 * 60 * 60 * 1000) > time) { return true; } else { return false; } } }); } else if (t.charAt(0) == '-') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getModificationTime()) / (24 * 60 * 60 * 1000) < time) { return true; } else { return false; } } }); } else { final long time = Long.parseLong(t); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getModificationTime()) / (24 * 60 * 60 * 1000) == time) { return true; } else { return false; } } }); } } // -amin test else if ("-amin".equals(args[i])) { i++; if (i >= args.length) { System.err.println("Missing arguement for -amin"); System.exit(1); } String t = args[i]; if (t.charAt(0) == '+') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getAccessTime()) / (60 * 1000) > time) { return true; } else { return false; } } }); } else if (t.charAt(0) == '-') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getAccessTime()) / (60 * 1000) < time) { return true; } else { return false; } } }); } else { final long time = Long.parseLong(t); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getAccessTime()) / (60 * 1000) == time) { return true; } else { return false; } } }); } } // -mmin test else if ("-mmin".equals(args[i])) { i++; if (i >= args.length) { System.err.println("Missing arguement for -mmin"); System.exit(1); } String t = args[i]; if (t.charAt(0) == '+') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getModificationTime()) / (60 * 1000) > time) { return true; } else { return false; } } }); } else if (t.charAt(0) == '-') { final long time = Long.parseLong(t.substring(1)); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getModificationTime()) / (60 * 1000) < time) { return true; } else { return false; } } }); } else { final long time = Long.parseLong(t); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if ((startTime - fileStatus.getModificationTime()) / (60 * 1000) == time) { return true; } else { return false; } } }); } } // -regex test else if ("-regex".equals(args[i])) { i++; if (i >= args.length) { System.err.println("Missing arguement for -regex"); System.exit(1); } final Pattern p = Pattern.compile(args[i]); tests.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { if (p.matcher(fileStatus.getPath().toString()).matches()) { return true; } else { return false; } } }); } i++; } if (actions.size() == 0) { actions.add(new FileStatusFilter() { @Override public boolean accept(FileStatus fileStatus) { System.out.println(fileStatus.getPath()); return true; } }); } search(); return 0; }
From source file:com.streamsets.pipeline.stage.origin.hdfs.spooler.HdfsFile.java
License:Apache License
@SuppressWarnings("unchecked") public Map<String, Object> getFileMetadata() throws IOException { FileStatus file = fs.getFileStatus(filePath); Map<String, Object> metadata = new HashMap<>(); metadata.put(HeaderAttributeConstants.FILE_NAME, file.getPath().getName()); metadata.put(HeaderAttributeConstants.FILE, file.getPath().toUri().getPath()); metadata.put(HeaderAttributeConstants.LAST_MODIFIED_TIME, file.getModificationTime()); metadata.put(HeaderAttributeConstants.LAST_ACCESS_TIME, file.getAccessTime()); metadata.put(HeaderAttributeConstants.IS_DIRECTORY, file.isDirectory()); metadata.put(HeaderAttributeConstants.IS_SYMBOLIC_LINK, file.isSymlink()); metadata.put(HeaderAttributeConstants.SIZE, file.getLen()); metadata.put(HeaderAttributeConstants.OWNER, file.getOwner()); metadata.put(HeaderAttributeConstants.GROUP, file.getGroup()); metadata.put(HeaderAttributeConstants.BLOCK_SIZE, file.getBlockSize()); metadata.put(HeaderAttributeConstants.REPLICATION, file.getReplication()); metadata.put(HeaderAttributeConstants.IS_ENCRYPTED, file.isEncrypted()); FsPermission permission = file.getPermission(); if (permission != null) { metadata.put(PERMISSIONS, permission.toString()); }//w ww .j a v a2s . c om return metadata; }
From source file:eagle.security.hdfs.entity.FileStatusEntity.java
License:Apache License
public FileStatusEntity(FileStatus status) throws IOException { //this.path = status.getPath(); this.length = status.getLen(); this.isdir = status.isDirectory(); this.block_replication = status.getReplication(); this.blocksize = status.getBlockSize(); this.modification_time = status.getModificationTime(); this.access_time = status.getAccessTime(); this.permission = status.getPermission(); this.owner = status.getOwner(); this.group = status.getGroup(); if (status.isSymlink()) { this.symlink = status.getSymlink(); }/*from www . jav a 2 s . c o m*/ }
From source file:edu.umn.cs.spatialHadoop.visualization.HadoopvizServer.java
License:Open Source License
/** * Lists the contents of a directory//from www . ja va2s. co m * @param request * @param response */ private void handleListFiles(HttpServletRequest request, HttpServletResponse response) { try { String pathStr = request.getParameter("path"); Path path = new Path(pathStr == null || pathStr.isEmpty() ? "/" : pathStr); FileSystem fs = path.getFileSystem(commonParams); FileStatus[] fileStatuses = fs.listStatus(path, SpatialSite.NonHiddenFileFilter); Arrays.sort(fileStatuses, new Comparator<FileStatus>() { @Override public int compare(FileStatus o1, FileStatus o2) { if (o1.isDirectory() && o2.isFile()) return -1; if (o1.isFile() && o2.isDirectory()) return 1; return o1.getPath().getName().toLowerCase().compareTo(o2.getPath().getName().toLowerCase()); } }); response.setContentType("application/json;charset=utf-8"); response.setStatus(HttpServletResponse.SC_OK); PrintWriter out = response.getWriter(); out.print("{\"FileStatuses\":{"); if (pathStr.endsWith("/")) { pathStr = pathStr.substring(0, pathStr.length() - 1); } out.printf("\"BaseDir\":\"%s\",", pathStr); if (path.getParent() != null) out.printf("\"ParentDir\":\"%s\",", path.getParent()); out.print("\"FileStatus\":["); for (int i = 0; i < fileStatuses.length; i++) { FileStatus fileStatus = fileStatuses[i]; if (i != 0) out.print(','); String filename = fileStatus.getPath().getName(); int idot = filename.lastIndexOf('.'); String extension = idot == -1 ? "" : filename.substring(idot + 1); out.printf( "{\"accessTime\":%d,\"blockSize\":%d,\"childrenNum\":%d,\"fileId\":%d," + "\"group\":\"%s\",\"length\":%d,\"modificationTime\":%d," + "\"owner\":\"%s\",\"pathSuffix\":\"%s\",\"permission\":\"%s\"," + "\"replication\":%d,\"storagePolicy\":%d,\"type\":\"%s\",\"extension\":\"%s\"}", fileStatus.getAccessTime(), fileStatus.getBlockSize(), 0, 0, fileStatus.getGroup(), fileStatus.getLen(), fileStatus.getModificationTime(), fileStatus.getOwner(), fileStatus.getPath().getName(), fileStatus.getPermission(), fileStatus.getReplication(), 0, fileStatus.isDirectory() ? "DIRECTORY" : "FILE", extension.toLowerCase()); } out.print("]}"); // Check if there is an image or master file FileStatus[] metaFiles = fs.listStatus(path, new PathFilter() { @Override public boolean accept(Path path) { return path.getName().startsWith("_master") || path.getName().equals("_data.png"); } }); for (FileStatus metaFile : metaFiles) { String metaFileName = metaFile.getPath().getName(); if (metaFileName.startsWith("_master")) { out.printf(",\"MasterPath\":\"%s\"", metaFileName); String shape = OperationsParams.detectShape(fileStatuses[0].getPath(), commonParams); if (shape != null) out.printf(",\"Shape\":\"%s\"", shape); } else if (metaFileName.equals("_data.png")) out.printf(",\"ImagePath\":\"%s\"", metaFileName); } out.print("}"); out.close(); } catch (Exception e) { System.out.println("error happened"); e.printStackTrace(); try { e.printStackTrace(response.getWriter()); } catch (IOException ioe) { ioe.printStackTrace(); e.printStackTrace(); } response.setContentType("text/plain;charset=utf-8"); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } }
From source file:fuse4j.hadoopfs.HdfsClientImpl.java
License:Apache License
/** * getFileInfo()/* w w w. ja va2 s.com*/ */ @Override public HdfsFileAttr getFileInfo(int uid, String path) { FileSystem dfs = null; try { dfs = getDfs(uid); FileStatus dfsStat = dfs.getFileStatus(new Path(path)); final boolean directory = dfsStat.isDir(); final int inode = 0; final int mode = dfsStat.getPermission().toShort(); final int uuid = userCache.getUid(dfsStat.getOwner()); final int gid = 0; // TODO: per-file block-size can't be retrieved correctly, // using default block size for now. final long size = dfsStat.getLen(); final int blocks = (int) Math.ceil(((double) size) / dfs.getDefaultBlockSize()); // modification/create-times are the same as access-time final int modificationTime = (int) (dfsStat.getModificationTime() / 1000); final int accessTime = (int) (dfsStat.getAccessTime() / 1000); HdfsFileAttr hdfsFileAttr = new HdfsFileAttr(directory, inode, mode, uuid, gid, 1); hdfsFileAttr.setSize(size, blocks); hdfsFileAttr.setTime(modificationTime, modificationTime, accessTime); // TODO Hack to set inode; hdfsFileAttr.inode = hdfsFileAttr.hashCode(); return hdfsFileAttr; } catch (Exception ioe) { // fall through to failure } // failed return null; }
From source file:gobblin.util.filesystem.InstrumentedFileSystemUtils.java
License:Apache License
/** * Replace the scheme of the input {@link FileStatus} if it matches the string to replace. *///from w ww .j av a2 s . c om public static FileStatus replaceScheme(FileStatus st, String replace, String replacement) { if (replace != null && replace.equals(replacement)) { return st; } try { return new FileStatus(st.getLen(), st.isDir(), st.getReplication(), st.getBlockSize(), st.getModificationTime(), st.getAccessTime(), st.getPermission(), st.getOwner(), st.getGroup(), st.isSymlink() ? st.getSymlink() : null, replaceScheme(st.getPath(), replace, replacement)); } catch (IOException ioe) { throw new RuntimeException(ioe); } }
From source file:hdfs.jsr203.attribute.HadoopFileAttributeView.java
License:Apache License
Object attribute(AttrID id, FileStatus hfas) { switch (id) { case accessTime: return hfas.getAccessTime(); case blockSize: return hfas.getBlockSize(); case group:/* www .ja v a 2 s . co m*/ return hfas.getGroup(); case len: return hfas.getLen(); case modificationTime: return hfas.getModificationTime(); case owner: return hfas.getOwner(); case replication: return hfas.getReplication(); case isDirectory: return hfas.isDirectory(); // TODO enable encryption //case isEncrypted: // return hfas.isEncrypted(); case isFile: return hfas.isFile(); case isSymLink: return hfas.isSymlink(); } return null; }
From source file:hdfs.jsr203.HadoopFileSystem.java
License:Apache License
public void setTimes(byte[] bs, FileTime mtime, FileTime atime, FileTime ctime) throws IOException { org.apache.hadoop.fs.Path hp = new HadoopPath(this, bs).getRawResolvedPath(); long mtime_millis = 0; long atime_millis = 0; // Get actual value if (mtime == null || atime == null) { FileStatus stat = this.fs.getFileStatus(hp); mtime_millis = stat.getModificationTime(); atime_millis = stat.getAccessTime(); }/*from w ww.j a v a 2 s .c om*/ if (mtime != null) { mtime_millis = mtime.toMillis(); } if (atime != null) { atime_millis = atime.toMillis(); } this.fs.setTimes(hp, mtime_millis, atime_millis); }
From source file:io.apigee.lembos.node.types.DistributedCacheWrap.java
License:Apache License
/** * Java wrapper for {@link DistributedCache#getFileStatus(Configuration, URI)}. * * @param ctx the JavaScript context//w ww . ja va 2 s .c o m * @param thisObj the 'this' object * @param args the function arguments * @param func the function being called * * @return array of archive class paths */ @JSStaticFunction public static Object getFileStatus(final Context ctx, final Scriptable thisObj, final Object[] args, final Function func) { final Object arg0 = args.length >= 1 ? args[0] : Undefined.instance; final Object arg1 = args.length >= 2 ? args[1] : Undefined.instance; if (args.length < 2) { throw Utils.makeError(ctx, thisObj, LembosMessages.TWO_ARGS_EXPECTED); } else if (!JavaScriptUtils.isDefined(arg0)) { throw Utils.makeError(ctx, thisObj, LembosMessages.FIRST_ARG_REQUIRED); } else if (!JavaScriptUtils.isDefined(arg1)) { throw Utils.makeError(ctx, thisObj, LembosMessages.SECOND_ARG_REQUIRED); } else if (!(arg0 instanceof ConfigurationWrap)) { throw Utils.makeError(ctx, thisObj, LembosMessages.FIRST_ARG_MUST_BE_CONF); } final URI hdfsUri = URI.create(arg1.toString()); FileStatus status; try { status = DistributedCache.getFileStatus(((ConfigurationWrap) arg0).getConf(), hdfsUri); } catch (IOException e) { throw Utils.makeError(ctx, thisObj, e.getMessage()); } if (status == null) { throw Utils.makeError(ctx, thisObj, "Unable to get file status for HDFS uri: " + hdfsUri.toString()); } final Scriptable jsStatus = ctx.newObject(thisObj); ScriptableObject.defineProperty(jsStatus, "accessTime", status.getAccessTime(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "blockSize", status.getBlockSize(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "group", status.getGroup(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "len", status.getLen(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "modificationTime", status.getModificationTime(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "owner", status.getOwner(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "path", status.getPath().toString(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "permission", status.getPermission().toString(), ScriptableObject.READONLY); ScriptableObject.defineProperty(jsStatus, "replication", status.getReplication(), ScriptableObject.READONLY); return jsStatus; }