List of usage examples for org.apache.hadoop.fs FileStatus getModificationTime
public long getModificationTime()
From source file:cc.solr.lucene.store.hdfs.HdfsDirectory.java
License:Apache License
public long fileModified(String name) throws IOException { name = getRealName(name);/*from w ww. j a va 2 s . c o m*/ if (!fileExists(name)) { throw new FileNotFoundException(name); } FileStatus fileStatus = getFileSystem().getFileStatus(new Path(_hdfsDirPath, name)); return fileStatus.getModificationTime(); }
From source file:ch.cern.db.hdfs.Main.java
License:GNU General Public License
private void printFileStatus(FileStatus status) { System.out.println();/*from w w w.j a v a 2s . c om*/ System.out.println("Showing metadata for: " + status.getPath()); System.out.println(" isDirectory: " + status.isDirectory()); System.out.println(" isFile: " + status.isFile()); System.out.println(" isSymlink: " + status.isSymlink()); System.out.println(" encrypted: " + status.isEncrypted()); System.out.println(" length: " + status.getLen()); System.out.println(" replication: " + status.getReplication()); System.out.println(" blocksize: " + status.getBlockSize()); System.out.println(" modification_time: " + new Date(status.getModificationTime())); System.out.println(" access_time: " + new Date(status.getAccessTime())); System.out.println(" owner: " + status.getOwner()); System.out.println(" group: " + status.getGroup()); System.out.println(" permission: " + status.getPermission()); System.out.println(); }
From source file:cn.ict.magicube.fs.shell.FixedLs.java
License:Apache License
@Override protected void processPath(PathData item) throws IOException { FileStatus stat = item.stat; String line = String.format(lineFormat, (stat.isDirectory() ? "d" : "-"), stat.getPermission(), (stat.isFile() ? stat.getReplication() : "-"), formatSize(stat.getLen()), dateFormat.format(new Date(stat.getModificationTime())), item); out.println(line);//from w w w . j a v a 2s .c o m }
From source file:com.alibaba.jstorm.hdfs.common.ModifTimeComparator.java
License:Apache License
@Override public int compare(FileStatus o1, FileStatus o2) { return new Long(o1.getModificationTime()).compareTo(o1.getModificationTime()); }
From source file:com.alibaba.jstorm.hdfs.transaction.RocksDbHdfsState.java
License:Apache License
private void removeObsoleteRemoteCheckpoints(long successBatchId) { long startTime = System.currentTimeMillis(); if (lastSuccessBatchId != -1 && lastSuccessBatchId != (successBatchId - 1)) { LOG.warn("Some ack msgs from TM were lost!. Last success batch Id: {}, Current success batch Id: {}", lastSuccessBatchId, successBatchId); }/* w w w . j av a 2 s . c o m*/ lastSuccessBatchId = successBatchId; long obsoleteBatchId = successBatchId - 1; try { Collection<String> lastSuccessSStFiles = hdfsCache .readLines(getRemoteCheckpointSstListFile(successBatchId)); if (hdfsCache.exist(getRemoteCheckpointPath(obsoleteBatchId))) { // remove obsolete sst files Collection<String> obsoleteSstFiles = hdfsCache .readLines(getRemoteCheckpointSstListFile(obsoleteBatchId)); obsoleteSstFiles.removeAll(lastSuccessSStFiles); for (String sstFile : obsoleteSstFiles) { hdfsCache.remove(hdfsDbDir + "/" + sstFile, false); } // remove checkpoint dir hdfsCache.remove(getRemoteCheckpointPath(obsoleteBatchId), true); } // Sometimes if remove was failed, some checkpoint files would be left in remote FS. // So here to check if full clean is required for a specified period. If so, clean all checkpoint files which are expired. long currentTime = System.currentTimeMillis(); if (currentTime - lastCleanTime > cleanPeriod) { FileStatus successCpFileStatus = hdfsCache .getFileStatus(getRemoteCheckpointSstListFile(successBatchId)); // remove obsolete sst files FileStatus[] fileStatuses = hdfsCache.listFileStatus(hdfsDbDir); for (FileStatus fileStatus : fileStatuses) { String fileName = fileStatus.getPath().getName(); if (fileStatus.getModificationTime() < successCpFileStatus.getModificationTime() && !lastSuccessSStFiles.contains(fileName)) { hdfsCache.remove(hdfsDbDir + "/" + fileName, true); } } // remove obsolete checkpoint dir fileStatuses = hdfsCache.listFileStatus(hdfsCheckpointDir); for (FileStatus fileStatus : fileStatuses) { String checkpointId = fileStatus.getPath().getName(); if (fileStatus.getModificationTime() < successCpFileStatus.getModificationTime() && Integer.valueOf(checkpointId) != successBatchId) { hdfsCache.remove(hdfsCheckpointDir + "/" + checkpointId, true); } } lastCleanTime = currentTime; } } catch (IOException e) { LOG.error("Failed to remove obsolete checkpoint data for batch-" + obsoleteBatchId, e); } if (JStormMetrics.enabled) this.hdfsDeleteLatency.update(System.currentTimeMillis() - startTime); }
From source file:com.asakusafw.cleaner.main.HDFSCleaner.java
License:Apache License
/** * ?//from w ww . j a v a 2 s . com * @param fs HDFS? * @param cleanPath HDFS?? * @param isSetExecutionId ID???????? * @param pattern * @param keepDate ?? * @param now ? * @param recursive ???? * @return ? * @throws CleanerSystemException */ private boolean cleanDir(FileSystem fs, Path cleanPath, boolean isSetExecutionId, String pattern, int keepDate, Date now, boolean recursive) throws CleanerSystemException { try { if (!fs.exists(cleanPath)) { // ?????? Log.log(CLASS, MessageIdConst.HCLN_CLEN_DIR_ERROR, "??????", cleanPath.toString()); return false; } if (!fs.getFileStatus(cleanPath).isDir()) { // ?????? Log.log(CLASS, MessageIdConst.HCLN_CLEN_DIR_ERROR, "??????", cleanPath.toString()); return false; } // ? Log.log(CLASS, MessageIdConst.HCLN_FILE_DELETE, cleanPath.toString()); int cleanFileCount = 0; int cleanDirCount = 0; boolean result = true; FileStatus[] dirStatus = getListStatus(fs, cleanPath); Path[] listedPaths = FileUtil.stat2Paths(dirStatus); for (Path path : listedPaths) { FileStatus status = fs.getFileStatus(path); long lastModifiedTime = status.getModificationTime(); if (status.isDir() && recursive) { // ???????? if (isSetExecutionId) { // ID??????MM??????? String executionId = path.getName(); if (isRunningJobFlow(executionId)) { // ??????? Log.log(CLASS, MessageIdConst.HCLN_CLEN_DIR_EXEC, path.toString()); continue; } } FileStatus[] childdirStatus = getListStatus(fs, path); if (childdirStatus.length == 0) { // ??????? if (isExpired(lastModifiedTime, keepDate, now)) { if (!fs.delete(path, false)) { Log.log(CLASS, MessageIdConst.HCLN_CLEN_FAIL, "", path.toString()); result = false; } else { cleanDirCount++; Log.log(CLASS, MessageIdConst.HCLN_DIR_DELETE, path.toString()); } } } else { // ????????? if (cleanDir(fs, path, false, pattern, keepDate, now, recursive)) { // ???????? childdirStatus = getListStatus(fs, path); if (childdirStatus.length == 0) { if (isExpired(lastModifiedTime, keepDate, now)) { if (!fs.delete(path, false)) { Log.log(CLASS, MessageIdConst.HCLN_CLEN_FAIL, "", path.toString()); result = false; } else { cleanDirCount++; Log.log(CLASS, MessageIdConst.HCLN_DIR_DELETE, path.toString()); } } } } else { Log.log(CLASS, MessageIdConst.HCLN_CLEN_FAIL, "", path.toString()); result = false; } } } else if (!status.isDir()) { // ??????????? if (isExpired(lastModifiedTime, keepDate, now) && isMatchPattern(path, pattern)) { if (!fs.delete(path, false)) { Log.log(CLASS, MessageIdConst.HCLN_CLEN_FAIL, "", path.toString()); result = false; } else { Log.log(CLASS, MessageIdConst.HCLN_DELETE_FILE, path.toString()); cleanFileCount++; } } } } Log.log(CLASS, MessageIdConst.HCLN_FILE_DELETE_SUCCESS, cleanPath.toString(), cleanDirCount, cleanFileCount); return result; } catch (IOException e) { Log.log(e, CLASS, MessageIdConst.HCLN_CLEN_DIR_EXCEPTION, cleanPath.getName()); return false; } }
From source file:com.asakusafw.operation.tools.hadoop.fs.Clean.java
License:Apache License
private boolean remove(FileSystem fs, FileStatus file, Context context) { LOG.debug("Attempt to remove {}", file.getPath()); //$NON-NLS-1$ boolean isSymlink = context.isSymlink(fs, file); if (isSymlink) { LOG.error(MessageFormat.format("[OT-CLEAN-W01001] Symlink is currenty not supported: {0}", file.getPath()));//from www . j ava 2s . co m context.setError(); return false; } if (file.isDir()) { if (context.isRecursive()) { List<FileStatus> children; try { children = asList(fs.listStatus(file.getPath())); } catch (IOException e) { LOG.error( MessageFormat.format("[OT-CLEAN-E01003] Failed to list directory: {0}", file.getPath()), e); context.setError(); return false; } boolean deleteChildren = true; for (FileStatus child : children) { deleteChildren &= remove(fs, child, context); } if (deleteChildren == false) { LOG.info(MessageFormat.format("[OT-CLEAN-I01004] Skipped: {0} (is no-empty directory)", file.getPath(), new Date(file.getModificationTime()))); return false; } } else { LOG.info(MessageFormat.format("[OT-CLEAN-I01003] Skipped: {0} (is directory)", file.getPath(), new Date(file.getModificationTime()))); return false; } } if (context.canDelete(file)) { LOG.debug("Removing {}", file.getPath()); //$NON-NLS-1$ if (context.isDryRun() == false) { try { boolean removed = fs.delete(file.getPath(), false); if (removed == false) { LOG.error(MessageFormat.format("[OT-CLEAN-E01004] Failed to remove: {0}", file.getPath())); context.setError(); return false; } } catch (IOException e) { LOG.warn(MessageFormat.format("[OT-CLEAN-E01004] Failed to remove: {0}", file.getPath()), e); context.setError(); return false; } } LOG.info(MessageFormat.format("[OT-CLEAN-I01001] Removed: {0} (timestamp={1})", file.getPath(), new Date(file.getModificationTime()))); } else { LOG.info(MessageFormat.format("[OT-CLEAN-I01002] Kept: {0} (timestamp={1})", file.getPath(), new Date(file.getModificationTime()))); return false; } return true; }
From source file:com.asakusafw.runtime.directio.hadoop.DirectIoTransactionEditor.java
License:Apache License
/** * Lists Direct I/O transaction information. * @return transactions, or an empty list if not exist * @throws IOException if failed to obtain transactions information * @throws IllegalArgumentException if some parameters were {@code null} *//*from w w w . j a v a2 s . c om*/ public List<TransactionInfo> list() throws IOException { LOG.info("Start listing Direct I/O transactions"); List<FileStatus> list = new ArrayList<>(HadoopDataSourceUtil.findAllTransactionInfoFiles(getConf())); if (list.isEmpty()) { LOG.info("There are no Direct I/O transactions"); return Collections.emptyList(); } Collections.sort(list, new Comparator<FileStatus>() { @Override public int compare(FileStatus o1, FileStatus o2) { return Long.compare(o1.getModificationTime(), o2.getModificationTime()); } }); LOG.info(MessageFormat.format("Start extracting {0} Direct I/O commit information", list.size())); List<TransactionInfo> results = new ArrayList<>(); for (FileStatus stat : list) { TransactionInfo commitObject = toInfoObject(stat); if (commitObject != null) { results.add(commitObject); } } LOG.info("Finish listing Direct I/O transactions"); return results; }
From source file:com.asakusafw.runtime.directio.hadoop.DirectIoTransactionEditor.java
License:Apache License
private TransactionInfo toInfoObject(FileStatus stat) throws IOException { assert stat != null; Path path = stat.getPath();// w w w .ja va 2s. c om String executionId = HadoopDataSourceUtil.getTransactionInfoExecutionId(path); long timestamp = stat.getModificationTime(); List<String> comment = new ArrayList<>(); Path commitMarkPath = HadoopDataSourceUtil.getCommitMarkPath(getConf(), executionId); FileSystem fs = path.getFileSystem(getConf()); boolean committed = fs.exists(commitMarkPath); try (FSDataInputStream input = fs.open(path); Scanner scanner = new Scanner(new InputStreamReader(input, HadoopDataSourceUtil.COMMENT_CHARSET))) { while (scanner.hasNextLine()) { comment.add(scanner.nextLine()); } } catch (IOException e) { comment.add(e.toString()); } return new TransactionInfo(executionId, timestamp, committed, comment); }
From source file:com.bigjob.Client.java
License:Apache License
/** * Main run function for the client/*from w w w . j a v a 2 s.com*/ * @return true if application completed successfully * @throws IOException * @throws YarnException */ public boolean run() throws IOException, YarnException { LOG.info("Running Client"); yarnClient.start(); YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics(); LOG.info("Got Cluster metric info from ASM (RM)" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers()); List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING); LOG.info("Got Cluster node info from ASM"); for (NodeReport node : clusterNodeReports) { LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress" + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers" + node.getNumContainers()); } QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue); LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity=" + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity() + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount=" + queueInfo.getChildQueues().size()); List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo(); for (QueueUserACLInfo aclInfo : listAclInfo) { for (QueueACL userAcl : aclInfo.getUserAcls()) { LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl=" + userAcl.name()); } } // Get a new application id YarnClientApplication app = yarnClient.createApplication(); GetNewApplicationResponse appResponse = app.getNewApplicationResponse(); // TODO get min/max resource capabilities from RM and change memory ask if needed // If we do not have min/max, we may not be able to correctly request // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager int maxMem = appResponse.getMaximumResourceCapability().getMemory(); LOG.info("Max mem capabililty of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. if (amMemory > maxMem) { LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified=" + amMemory + ", max=" + maxMem); amMemory = maxMem; } int maxVCores = appResponse.getMaximumResourceCapability().getVirtualCores(); LOG.info("Max virtual cores capabililty of resources in this cluster " + maxVCores); if (amVCores > maxVCores) { LOG.info("AM virtual cores specified above max threshold of cluster. " + "Using max value." + ", specified=" + amVCores + ", max=" + maxVCores); amVCores = maxVCores; } // set the application name ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext(); ApplicationId appId = appContext.getApplicationId(); appContext.setApplicationName(appName); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); // set local resources for the application master // local files or archives as needed // In this scenario, the jar file for the application master is part of the local resources Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); LOG.info("Copy App Master jar from local filesystem and add to local environment"); // Copy the application master jar to the filesystem // Create a local resource to point to the destination jar path // if (dfsUrl!=null && dfsUrl.equals("")==false){ // conf.set("fs.defaultFS", dfsUrl); // } FileSystem fs = FileSystem.get(conf); addToLocalResources(fs, appMasterJar, appMasterJarPath, appId.getId(), localResources, null); // Set the log4j properties if needed if (!log4jPropFile.isEmpty()) { addToLocalResources(fs, log4jPropFile, log4jPath, appId.getId(), localResources, null); } // The shell script has to be made available on the final container(s) // where it will be executed. // To do this, we need to first copy into the filesystem that is visible // to the yarn framework. // We do not need to set this as a local resource for the application // master as the application master does not need it. String hdfsShellScriptLocation = ""; long hdfsShellScriptLen = 0; long hdfsShellScriptTimestamp = 0; if (!shellScriptPath.isEmpty()) { Path shellSrc = new Path(shellScriptPath); String shellPathSuffix = appName + "/" + appId.getId() + "/" + (Shell.WINDOWS ? windowBatPath : linuxShellPath); Path shellDst = new Path(fs.getHomeDirectory(), shellPathSuffix); fs.copyFromLocalFile(false, true, shellSrc, shellDst); hdfsShellScriptLocation = shellDst.toUri().toString(); FileStatus shellFileStatus = fs.getFileStatus(shellDst); hdfsShellScriptLen = shellFileStatus.getLen(); hdfsShellScriptTimestamp = shellFileStatus.getModificationTime(); } if (!shellCommand.isEmpty()) { addToLocalResources(fs, null, shellCommandPath, appId.getId(), localResources, shellCommand); } if (shellArgs.length > 0) { addToLocalResources(fs, null, shellArgsPath, appId.getId(), localResources, StringUtils.join(shellArgs, " ")); } // Set local resource info into app master container launch context amContainer.setLocalResources(localResources); // Set the necessary security tokens as needed //amContainer.setContainerTokens(containerToken); // Set the env variables to be setup in the env where the application master will be run LOG.info("Set the environment for the application master"); Map<String, String> env = new HashMap<String, String>(); // put location of shell script into env // using the env info, the application master will create the correct local resource for the // eventual containers that will be launched to execute the shell scripts env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION, hdfsShellScriptLocation); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP, Long.toString(hdfsShellScriptTimestamp)); env.put(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN, Long.toString(hdfsShellScriptLen)); // Add AppMaster.jar location to classpath // At some point we should not be required to add // the hadoop specific classpaths to the env. // It should be provided out of the box. // For now setting all required classpaths including // the classpath to "." for the application jar StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar) .append("./*"); for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH, YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) { classPathEnv.append(File.pathSeparatorChar); classPathEnv.append(c.trim()); } classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties"); // add the runtime classpath needed for tests to work if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { classPathEnv.append(':'); classPathEnv.append(System.getProperty("java.class.path")); } env.put("CLASSPATH", classPathEnv.toString()); amContainer.setEnvironment(env); // Set the necessary command to execute the application master Vector<CharSequence> vargs = new Vector<CharSequence>(30); // Set java executable command LOG.info("Setting up app master command"); vargs.add(Environment.JAVA_HOME.$() + "/bin/java"); // Set Xmx based on am memory size vargs.add("-Xmx" + amMemory + "m"); // Set class name vargs.add(appMasterMainClass); // Set params for Application Master vargs.add("--container_memory " + String.valueOf(containerMemory)); vargs.add("--container_vcores " + String.valueOf(containerVirtualCores)); vargs.add("--num_containers " + String.valueOf(numContainers)); vargs.add("--priority " + String.valueOf(shellCmdPriority)); for (Map.Entry<String, String> entry : shellEnv.entrySet()) { vargs.add("--shell_env " + entry.getKey() + "=" + entry.getValue()); } if (debugFlag) { vargs.add("--debug"); } vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/AppMaster.stderr"); // Get final commmand StringBuilder command = new StringBuilder(); for (CharSequence str : vargs) { command.append(str).append(" "); } LOG.info("Completed setting up app master command " + command.toString()); List<String> commands = new ArrayList<String>(); commands.add(command.toString()); amContainer.setCommands(commands); // Set up resource type requirements // For now, both memory and vcores are supported, so we set memory and // vcores requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(amMemory); capability.setVirtualCores(amVCores); appContext.setResource(capability); // Service data is a binary blob that can be passed to the application // Not needed in this scenario // amContainer.setServiceData(serviceData); // Setup security tokens if (UserGroupInformation.isSecurityEnabled()) { Credentials credentials = new Credentials(); String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL); if (tokenRenewer == null || tokenRenewer.length() == 0) { throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer"); } // For now, only getting tokens for the default file-system. final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials); if (tokens != null) { for (Token<?> token : tokens) { LOG.info("Got dt for " + fs.getUri() + "; " + token); } } DataOutputBuffer dob = new DataOutputBuffer(); credentials.writeTokenStorageToStream(dob); ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); amContainer.setTokens(fsTokens); } appContext.setAMContainerSpec(amContainer); // Set the priority for the application master Priority pri = Records.newRecord(Priority.class); // TODO - what is the range for priority? how to decide? pri.setPriority(amPriority); appContext.setPriority(pri); // Set the queue to which this application is to be submitted in the RM appContext.setQueue(amQueue); // Submit the application to the applications manager // SubmitApplicationResponse submitResp = applicationsManager.submitApplication(appRequest); // Ignore the response as either a valid response object is returned on success // or an exception thrown to denote some form of a failure LOG.info("Submitting application to ASM"); yarnClient.submitApplication(appContext); // TODO // Try submitting the same request again // app submission failure? // Monitor the application //return monitorApplication(appId); System.out.println("ApplicationId:" + appId); return true; }