List of usage examples for org.apache.hadoop.fs FileStatus getModificationTime
public long getModificationTime()
From source file:org.apache.accumulo.server.master.CoordinateRecoveryTask.java
License:Apache License
private void removeOldRecoverFiles() throws IOException { long now = System.currentTimeMillis(); long maxAgeInMillis = ServerConfiguration.getSystemConfiguration() .getTimeInMillis(Property.MASTER_RECOVERY_MAXAGE); FileStatus[] children = fs.listStatus(new Path(ServerConstants.getRecoveryDir())); if (children != null) { for (FileStatus child : children) { if (now - child.getModificationTime() > maxAgeInMillis && !fs.delete(child.getPath(), true)) { log.warn("Unable to delete old recovery directory: " + child.getPath()); }/*from w w w. j av a 2 s . c o m*/ } } }
From source file:org.apache.ambari.servicemonitor.clients.LsDir.java
License:Apache License
@Override protected Operation executeOneOperation() throws IOException { Operation operation = new Operation("ls " + dir); started(operation);// w w w . jav a 2 s . co m try { FileSystem result; result = cachedFS.getOrCreate(); FileSystem fs = result; FileStatus dirStatus = fs.getFileStatus(dir); StringBuilder builder = new StringBuilder(); builder.append("File ").append(dirStatus.getPath()); builder.append(" type: "); builder.append(dirStatus.isDir() ? "directory" : "file"); builder.append(" "); builder.append("Last Modified ").append(new Date(dirStatus.getModificationTime())); operation.success(builder.toString()); } catch (ExitClientRunException e) { //propagate this up throw e; } catch (IOException e) { //all other outcomes are failures operation.failure(e); } return operation; }
From source file:org.apache.ambari.view.filebrowser.HdfsApi.java
License:Apache License
/** * Converts a Hadoop <code>FileStatus</code> object into a JSON array object. * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the * specified URL./*from w ww.ja v a 2 s. com*/ * <p/> * * @param status * Hadoop file status. * @return The JSON representation of the file status. */ public Map<String, Object> fileStatusToJSON(FileStatus status) { Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString()); json.put("replication", status.getReplication()); json.put("isDirectory", status.isDirectory()); json.put("len", status.getLen()); json.put("owner", status.getOwner()); json.put("group", status.getGroup()); json.put("permission", permissionToString(status.getPermission())); json.put("accessTime", status.getAccessTime()); json.put("modificationTime", status.getModificationTime()); json.put("blockSize", status.getBlockSize()); json.put("replication", status.getReplication()); json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi)); json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi)); json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi)); return json; }
From source file:org.apache.ambari.view.hive.utils.HdfsApi.java
License:Apache License
/** * Converts a Hadoop <code>FileStatus</code> object into a JSON array object. * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the * specified URL./*www .j ava2 s . c o m*/ * <p/> * * @param status * Hadoop file status. * @return The JSON representation of the file status. */ public static Map<String, Object> fileStatusToJSON(FileStatus status) { Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put("path", status.getPath().toString()); json.put("isDirectory", status.isDirectory()); json.put("len", status.getLen()); json.put("owner", status.getOwner()); json.put("group", status.getGroup()); json.put("permission", permissionToString(status.getPermission())); json.put("accessTime", status.getAccessTime()); json.put("modificationTime", status.getModificationTime()); json.put("blockSize", status.getBlockSize()); json.put("replication", status.getReplication()); return json; }
From source file:org.apache.ambari.view.utils.hdfs.HdfsApi.java
License:Apache License
/** * Converts a Hadoop <code>FileStatus</code> object into a JSON array object. * It replaces the <code>SCHEME://HOST:PORT</code> of the path with the * specified URL.//from w w w. j a v a 2 s.c o m * <p/> * * @param status * Hadoop file status. * @return The JSON representation of the file status. */ public Map<String, Object> fileStatusToJSON(FileStatus status) { Map<String, Object> json = new LinkedHashMap<String, Object>(); json.put("path", Path.getPathWithoutSchemeAndAuthority(status.getPath()).toString()); json.put("replication", status.getReplication()); json.put("isDirectory", status.isDirectory()); json.put("len", status.getLen()); json.put("owner", status.getOwner()); json.put("group", status.getGroup()); json.put("permission", permissionToString(status.getPermission())); json.put("accessTime", status.getAccessTime()); json.put("modificationTime", status.getModificationTime()); json.put("blockSize", status.getBlockSize()); json.put("replication", status.getReplication()); json.put("readAccess", checkAccessPermissions(status, FsAction.READ, ugi)); json.put("writeAccess", checkAccessPermissions(status, FsAction.WRITE, ugi)); json.put("executeAccess", checkAccessPermissions(status, FsAction.EXECUTE, ugi)); return json; }
From source file:org.apache.asterix.aoya.AsterixApplicationMaster.java
License:Apache License
/** * Here I am just pointing the Containers to the exisiting HDFS resources given by the Client * filesystem of the nodes./* w w w. j a v a2s. c om*/ * * @throws IOException */ private void localizeDFSResources() throws IOException { //if performing an 'offline' task, skip a lot of resource distribution if (obliterate || backup || restore) { if (appMasterJar == null || ("").equals(appMasterJar)) { //this can happen in a jUnit testing environment. we don't need to set it there. if (!conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { throw new IllegalStateException("AM jar not provided in environment."); } else { return; } } FileSystem fs = FileSystem.get(conf); FileStatus appMasterJarStatus = fs.getFileStatus(appMasterJar); LocalResource obliteratorJar = Records.newRecord(LocalResource.class); obliteratorJar.setType(LocalResourceType.FILE); obliteratorJar.setVisibility(LocalResourceVisibility.PRIVATE); obliteratorJar.setResource(ConverterUtils.getYarnUrlFromPath(appMasterJar)); obliteratorJar.setTimestamp(appMasterJarStatus.getModificationTime()); obliteratorJar.setSize(appMasterJarStatus.getLen()); localResources.put("asterix-yarn.jar", obliteratorJar); LOG.info(localResources.values()); return; } //otherwise, distribute evertything to start up asterix LocalResource asterixZip = Records.newRecord(LocalResource.class); //this un-tar's the asterix distribution asterixZip.setType(LocalResourceType.ARCHIVE); asterixZip.setVisibility(LocalResourceVisibility.PRIVATE); try { asterixZip.setResource(ConverterUtils.getYarnUrlFromURI(new URI(asterixZipPath))); } catch (URISyntaxException e) { LOG.error("Error locating Asterix zip" + " in env, path=" + asterixZipPath); throw new IOException(e); } asterixZip.setTimestamp(asterixZipTimestamp); asterixZip.setSize(asterixZipLen); localResources.put(ASTERIX_ZIP_NAME, asterixZip); //now let's do the same for the cluster description XML LocalResource asterixConf = Records.newRecord(LocalResource.class); asterixConf.setType(LocalResourceType.FILE); asterixConf.setVisibility(LocalResourceVisibility.PRIVATE); try { asterixConf.setResource(ConverterUtils.getYarnUrlFromURI(new URI(asterixConfPath))); } catch (URISyntaxException e) { LOG.error("Error locating Asterix config" + " in env, path=" + asterixConfPath); throw new IOException(e); } //TODO: I could avoid localizing this everywhere by only calling this block on the metadata node. asterixConf.setTimestamp(asterixConfTimestamp); asterixConf.setSize(asterixConfLen); localResources.put("cluster-config.xml", asterixConf); //now add the libraries if there are any try { FileSystem fs = FileSystem.get(conf); Path p = new Path(dfsBasePath, instanceConfPath + File.separator + "library" + Path.SEPARATOR); if (fs.exists(p)) { FileStatus[] dataverses = fs.listStatus(p); for (FileStatus d : dataverses) { if (!d.isDirectory()) throw new IOException("Library configuration directory structure is incorrect"); FileStatus[] libraries = fs.listStatus(d.getPath()); for (FileStatus l : libraries) { if (l.isDirectory()) throw new IOException("Library configuration directory structure is incorrect"); LocalResource lr = Records.newRecord(LocalResource.class); lr.setResource(ConverterUtils.getYarnUrlFromURI(l.getPath().toUri())); lr.setSize(l.getLen()); lr.setTimestamp(l.getModificationTime()); lr.setType(LocalResourceType.ARCHIVE); lr.setVisibility(LocalResourceVisibility.PRIVATE); localResources.put("library" + Path.SEPARATOR + d.getPath().getName() + Path.SEPARATOR + l.getPath().getName().split("\\.")[0], lr); LOG.info("Found library: " + l.getPath().toString()); LOG.info(l.getPath().getName()); } } } } catch (FileNotFoundException e) { LOG.info("No external libraries present"); //do nothing, it just means there aren't libraries. that is possible and ok // it should be handled by the fs.exists(p) check though. } LOG.info(localResources.values()); }
From source file:org.apache.asterix.external.indexing.input.TextFileLookupReader.java
License:Apache License
@Override public String read(int fileNumber, long recordOffset) throws Exception { if (fileNumber != this.fileNumber) { this.fileNumber = fileNumber; filesIndexAccessor.searchForFile(fileNumber, file); try {//from w ww. j av a 2 s . c o m FileStatus fileStatus = fs.getFileStatus(new Path(file.getFileName())); if (fileStatus.getModificationTime() != file.getLastModefiedTime().getTime()) { this.fileNumber = fileNumber; skipFile = true; return null; } else { this.fileNumber = fileNumber; skipFile = false; openFile(file.getFileName()); } } catch (FileNotFoundException e) { // File is not there, skip it and do nothing this.fileNumber = fileNumber; skipFile = true; return null; } } else if (skipFile) { return null; } lineReader.seek(recordOffset); lineReader.readLine(value); return value.toString(); }
From source file:org.apache.asterix.external.input.record.reader.hdfs.AbstractHDFSLookupRecordReader.java
License:Apache License
private void validate() throws IllegalArgumentException, IOException { FileStatus fileStatus = fs.getFileStatus(new Path(file.getFileName())); replaced = fileStatus.getModificationTime() != file.getLastModefiedTime().getTime(); }
From source file:org.apache.asterix.external.input.record.reader.hdfs.HDFSRecordReader.java
License:Apache License
private boolean nextInputSplit() throws IOException { for (; currentSplitIndex < inputSplits.length; currentSplitIndex++) { /**//from w ww . j a v a 2 s .c om * read all the partitions scheduled to the current node */ if (readSchedule[currentSplitIndex].equals(nodeName)) { /** * pick an unread split to read synchronize among * simultaneous partitions in the same machine */ synchronized (read) { if (read[currentSplitIndex] == false) { read[currentSplitIndex] = true; } else { continue; } } if (snapshot != null) { String fileName = ((FileSplit) (inputSplits[currentSplitIndex])).getPath().toUri().getPath(); FileStatus fileStatus = hdfs.getFileStatus(new Path(fileName)); // Skip if not the same file stored in the files snapshot if (fileStatus.getModificationTime() != snapshot.get(currentSplitIndex).getLastModefiedTime() .getTime()) { continue; } } reader.close(); reader = getRecordReader(currentSplitIndex); return true; } } return false; }
From source file:org.apache.asterix.external.util.HDFSUtils.java
License:Apache License
/** * Instead of creating the split using the input format, we do it manually * This function returns fileSplits (1 per hdfs file block) irrespective of the number of partitions * and the produced splits only cover intersection between current files in hdfs and files stored internally * in AsterixDB//from w w w.ja v a2s . c om * 1. NoOp means appended file * 2. AddOp means new file * 3. UpdateOp means the delta of a file * @return * @throws IOException */ public static InputSplit[] getSplits(JobConf conf, List<ExternalFile> files) throws IOException { // Create file system object FileSystem fs = FileSystem.get(conf); ArrayList<FileSplit> fileSplits = new ArrayList<FileSplit>(); ArrayList<ExternalFile> orderedExternalFiles = new ArrayList<ExternalFile>(); // Create files splits for (ExternalFile file : files) { Path filePath = new Path(file.getFileName()); FileStatus fileStatus; try { fileStatus = fs.getFileStatus(filePath); } catch (FileNotFoundException e) { // file was deleted at some point, skip to next file continue; } if (file.getPendingOp() == ExternalFilePendingOp.PENDING_ADD_OP && fileStatus.getModificationTime() == file.getLastModefiedTime().getTime()) { // Get its information from HDFS name node BlockLocation[] fileBlocks = fs.getFileBlockLocations(fileStatus, 0, file.getSize()); // Create a split per block for (BlockLocation block : fileBlocks) { if (block.getOffset() < file.getSize()) { fileSplits.add(new FileSplit(filePath, block.getOffset(), (block.getLength() + block.getOffset()) < file.getSize() ? block.getLength() : (file.getSize() - block.getOffset()), block.getHosts())); orderedExternalFiles.add(file); } } } else if (file.getPendingOp() == ExternalFilePendingOp.PENDING_NO_OP && fileStatus.getModificationTime() == file.getLastModefiedTime().getTime()) { long oldSize = 0L; long newSize = file.getSize(); for (int i = 0; i < files.size(); i++) { if (files.get(i).getFileName() == file.getFileName() && files.get(i).getSize() != file.getSize()) { newSize = files.get(i).getSize(); oldSize = file.getSize(); break; } } // Get its information from HDFS name node BlockLocation[] fileBlocks = fs.getFileBlockLocations(fileStatus, 0, newSize); // Create a split per block for (BlockLocation block : fileBlocks) { if (block.getOffset() + block.getLength() > oldSize) { if (block.getOffset() < newSize) { // Block interact with delta -> Create a split long startCut = (block.getOffset() > oldSize) ? 0L : oldSize - block.getOffset(); long endCut = (block.getOffset() + block.getLength() < newSize) ? 0L : block.getOffset() + block.getLength() - newSize; long splitLength = block.getLength() - startCut - endCut; fileSplits.add(new FileSplit(filePath, block.getOffset() + startCut, splitLength, block.getHosts())); orderedExternalFiles.add(file); } } } } } fs.close(); files.clear(); files.addAll(orderedExternalFiles); return fileSplits.toArray(new FileSplit[fileSplits.size()]); }