List of usage examples for org.apache.hadoop.fs FileStatus getModificationTime
public long getModificationTime()
From source file:edu.umn.cs.spatialHadoop.visualization.HadoopvizServer.java
License:Open Source License
/** * Lists the contents of a directory//from w w w . j a v a2s. c o m * @param request * @param response */ private void handleListFiles(HttpServletRequest request, HttpServletResponse response) { try { String pathStr = request.getParameter("path"); Path path = new Path(pathStr == null || pathStr.isEmpty() ? "/" : pathStr); FileSystem fs = path.getFileSystem(commonParams); FileStatus[] fileStatuses = fs.listStatus(path, SpatialSite.NonHiddenFileFilter); Arrays.sort(fileStatuses, new Comparator<FileStatus>() { @Override public int compare(FileStatus o1, FileStatus o2) { if (o1.isDirectory() && o2.isFile()) return -1; if (o1.isFile() && o2.isDirectory()) return 1; return o1.getPath().getName().toLowerCase().compareTo(o2.getPath().getName().toLowerCase()); } }); response.setContentType("application/json;charset=utf-8"); response.setStatus(HttpServletResponse.SC_OK); PrintWriter out = response.getWriter(); out.print("{\"FileStatuses\":{"); if (pathStr.endsWith("/")) { pathStr = pathStr.substring(0, pathStr.length() - 1); } out.printf("\"BaseDir\":\"%s\",", pathStr); if (path.getParent() != null) out.printf("\"ParentDir\":\"%s\",", path.getParent()); out.print("\"FileStatus\":["); for (int i = 0; i < fileStatuses.length; i++) { FileStatus fileStatus = fileStatuses[i]; if (i != 0) out.print(','); String filename = fileStatus.getPath().getName(); int idot = filename.lastIndexOf('.'); String extension = idot == -1 ? "" : filename.substring(idot + 1); out.printf( "{\"accessTime\":%d,\"blockSize\":%d,\"childrenNum\":%d,\"fileId\":%d," + "\"group\":\"%s\",\"length\":%d,\"modificationTime\":%d," + "\"owner\":\"%s\",\"pathSuffix\":\"%s\",\"permission\":\"%s\"," + "\"replication\":%d,\"storagePolicy\":%d,\"type\":\"%s\",\"extension\":\"%s\"}", fileStatus.getAccessTime(), fileStatus.getBlockSize(), 0, 0, fileStatus.getGroup(), fileStatus.getLen(), fileStatus.getModificationTime(), fileStatus.getOwner(), fileStatus.getPath().getName(), fileStatus.getPermission(), fileStatus.getReplication(), 0, fileStatus.isDirectory() ? "DIRECTORY" : "FILE", extension.toLowerCase()); } out.print("]}"); // Check if there is an image or master file FileStatus[] metaFiles = fs.listStatus(path, new PathFilter() { @Override public boolean accept(Path path) { return path.getName().startsWith("_master") || path.getName().equals("_data.png"); } }); for (FileStatus metaFile : metaFiles) { String metaFileName = metaFile.getPath().getName(); if (metaFileName.startsWith("_master")) { out.printf(",\"MasterPath\":\"%s\"", metaFileName); String shape = OperationsParams.detectShape(fileStatuses[0].getPath(), commonParams); if (shape != null) out.printf(",\"Shape\":\"%s\"", shape); } else if (metaFileName.equals("_data.png")) out.printf(",\"ImagePath\":\"%s\"", metaFileName); } out.print("}"); out.close(); } catch (Exception e) { System.out.println("error happened"); e.printStackTrace(); try { e.printStackTrace(response.getWriter()); } catch (IOException ioe) { ioe.printStackTrace(); e.printStackTrace(); } response.setContentType("text/plain;charset=utf-8"); response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR); } }
From source file:es.tid.cosmos.platform.injection.server.HadoopSshFile.java
License:Open Source License
@Override public long getLastModified() { try {// w w w. j a v a 2 s. c o m FileStatus fileStatus = this.hadoopFS.getFileStatus(this.hadoopPath); return fileStatus.getModificationTime(); } catch (IOException e) { LOG.error(e.getMessage(), e); return 0L; } // try catch }
From source file:eu.stratosphere.yarn.Utils.java
License:Apache License
public static void registerLocalResource(FileSystem fs, Path remoteRsrcPath, LocalResource localResource) throws IOException { FileStatus jarStat = fs.getFileStatus(remoteRsrcPath); localResource.setResource(ConverterUtils.getYarnUrlFromURI(remoteRsrcPath.toUri())); localResource.setSize(jarStat.getLen()); localResource.setTimestamp(jarStat.getModificationTime()); localResource.setType(LocalResourceType.FILE); localResource.setVisibility(LocalResourceVisibility.PUBLIC); }
From source file:fr.ens.biologie.genomique.eoulsan.data.protocols.PathDataProtocol.java
License:LGPL
@Override public DataFileMetadata getMetadata(final DataFile src) throws IOException { if (!exists(src, true)) { throw new FileNotFoundException("File not found: " + src); }// w w w .j a va 2 s . c om final Path path = getPath(src); final FileStatus status = path.getFileSystem(this.conf).getFileStatus(path); final SimpleDataFileMetadata result = new SimpleDataFileMetadata(); result.setContentLength(status.getLen()); result.setLastModified(status.getModificationTime()); result.setDataFormat(DataFormatRegistry.getInstance().getDataFormatFromFilename(src.getName())); final CompressionType ct = CompressionType.getCompressionTypeByFilename(src.getSource()); if (ct != null) { result.setContentEncoding(ct.getContentEncoding()); } if (status.isDirectory()) { result.setDirectory(true); } if (status.isSymlink()) { result.setSymbolicLink(new DataFile(status.getSymlink().toUri())); } return result; }
From source file:fuse4j.hadoopfs.HdfsClientImpl.java
License:Apache License
/** * getFileInfo()//w ww .j av a 2s .c o m */ @Override public HdfsFileAttr getFileInfo(int uid, String path) { FileSystem dfs = null; try { dfs = getDfs(uid); FileStatus dfsStat = dfs.getFileStatus(new Path(path)); final boolean directory = dfsStat.isDir(); final int inode = 0; final int mode = dfsStat.getPermission().toShort(); final int uuid = userCache.getUid(dfsStat.getOwner()); final int gid = 0; // TODO: per-file block-size can't be retrieved correctly, // using default block size for now. final long size = dfsStat.getLen(); final int blocks = (int) Math.ceil(((double) size) / dfs.getDefaultBlockSize()); // modification/create-times are the same as access-time final int modificationTime = (int) (dfsStat.getModificationTime() / 1000); final int accessTime = (int) (dfsStat.getAccessTime() / 1000); HdfsFileAttr hdfsFileAttr = new HdfsFileAttr(directory, inode, mode, uuid, gid, 1); hdfsFileAttr.setSize(size, blocks); hdfsFileAttr.setTime(modificationTime, modificationTime, accessTime); // TODO Hack to set inode; hdfsFileAttr.inode = hdfsFileAttr.hashCode(); return hdfsFileAttr; } catch (Exception ioe) { // fall through to failure } // failed return null; }
From source file:fuse4j.hadoopfs.HdfsClientReal.java
License:Apache License
/** * getFileInfo()//from w w w .ja va2s. com */ public HdfsFileAttr getFileInfo(String path) { try { FileStatus dfsStat = dfs.getFileStatus(new Path(path)); final boolean directory = dfsStat.isDirectory(); final int inode = 0; final int mode = dfsStat.getPermission().toShort(); final int uid = userCache.getUid(dfsStat.getOwner()); final int gid = 0; // TODO: per-file block-size can't be retrieved correctly, // using default block size for now. final long size = dfsStat.getLen(); final int blocks = (int) Math.ceil(((double) size) / dfs.getDefaultBlockSize()); // modification/create-times are the same as access-time final int modificationTime = (int) (dfsStat.getModificationTime() / 1000); HdfsFileAttr hdfsFileAttr = new HdfsFileAttr(directory, inode, mode, uid, gid, 1); hdfsFileAttr.setSize(size, blocks); hdfsFileAttr.setTime(modificationTime); // TODO Hack to set inode; hdfsFileAttr.inode = hdfsFileAttr.hashCode(); return hdfsFileAttr; } catch (IOException ioe) { // fall through to failure } // failed return null; }
From source file:gobblin.compaction.dataset.DatasetHelper.java
License:Apache License
public Optional<DateTime> getEarliestLateFileModificationTime() { DateTimeZone timeZone = DateTimeZone.forID(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE)); try {/*from www .j a v a 2 s . c o m*/ long maxTimestamp = Long.MIN_VALUE; for (FileStatus status : FileListUtils.listFilesRecursively(this.fs, this.dataset.outputLatePath())) { maxTimestamp = Math.max(maxTimestamp, status.getModificationTime()); } return maxTimestamp == Long.MIN_VALUE ? Optional.<DateTime>absent() : Optional.of(new DateTime(maxTimestamp, timeZone)); } catch (Exception e) { logger.error("Failed to get earliest late file modification time"); return Optional.absent(); } }
From source file:gobblin.compaction.mapreduce.MRCompactorJobPropCreator.java
License:Apache License
/** * Check if inputFolder contains any files which have modification times which are more * recent than the last compaction time as stored within outputFolder; return any files * which do. An empty list will be returned if all files are older than the last compaction time. *//* w w w. j av a 2 s. c o m*/ private Set<Path> getNewDataInFolder(Path inputFolder, Path outputFolder) throws IOException { Set<Path> newFiles = Sets.newHashSet(); if (!this.fs.exists(inputFolder) || !this.fs.exists(outputFolder)) { return newFiles; } DateTime lastCompactionTime = new DateTime(MRCompactor.readCompactionTimestamp(this.fs, outputFolder)); for (FileStatus fstat : FileListUtils.listFilesRecursively(this.fs, inputFolder)) { DateTime fileModificationTime = new DateTime(fstat.getModificationTime()); if (fileModificationTime.isAfter(lastCompactionTime)) { LOG.info("[" + fileModificationTime.getMillis() + "] " + fstat.getPath() + " is after " + lastCompactionTime.getMillis()); newFiles.add(fstat.getPath()); } } if (!newFiles.isEmpty()) { LOG.info(String.format("Found %d new files within folder %s which are more recent than the previous " + "compaction start time of %s.", newFiles.size(), inputFolder, lastCompactionTime)); } return newFiles; }
From source file:gobblin.compaction.mapreduce.MRCompactorJobRunner.java
License:Apache License
/** * For regular compactions, compaction timestamp is the time the compaction job starts. * * If this is a recompaction from output paths, the compaction timestamp will remain the same as previously * persisted compaction time. This is because such a recompaction doesn't consume input data, so next time, * whether a file in the input folder is considered late file should still be based on the previous compaction * timestamp./*from w w w. ja va 2s .c o m*/ */ private DateTime getCompactionTimestamp() throws IOException { DateTimeZone timeZone = DateTimeZone.forID(this.dataset.jobProps().getProp(MRCompactor.COMPACTION_TIMEZONE, MRCompactor.DEFAULT_COMPACTION_TIMEZONE)); if (!this.recompactFromDestPaths) { return new DateTime(timeZone); } Set<Path> inputPaths = getInputPaths(); long maxTimestamp = Long.MIN_VALUE; for (FileStatus status : FileListUtils.listFilesRecursively(this.fs, inputPaths)) { maxTimestamp = Math.max(maxTimestamp, status.getModificationTime()); } return maxTimestamp == Long.MIN_VALUE ? new DateTime(timeZone) : new DateTime(maxTimestamp, timeZone); }
From source file:gobblin.compaction.mapreduce.MRCompactorJobRunner.java
License:Apache License
private boolean findNewDataSinceCompactionStarted(Path inputPath, DateTime jobStartTime) throws IOException { for (FileStatus fstat : FileListUtils.listFilesRecursively(this.fs, inputPath)) { DateTime fileModificationTime = new DateTime(fstat.getModificationTime()); if (fileModificationTime.isAfter(jobStartTime)) { LOG.info(String.format( "Found new file %s in input folder %s after compaction started. Will abort compaction.", fstat.getPath(), inputPath)); return true; }/*from w w w . j a v a 2 s. co m*/ } return false; }