List of usage examples for org.apache.hadoop.fs FileStatus getModificationTime
public long getModificationTime()
From source file:org.apache.ignite.yarn.utils.IgniteYarnUtils.java
License:Apache License
/** * @param file Path.//from w w w.jav a2 s .com * @param fs File system. * @param type Local resource type. * @throws Exception If failed. */ public static LocalResource setupFile(Path file, FileSystem fs, LocalResourceType type) throws Exception { LocalResource resource = Records.newRecord(LocalResource.class); file = fs.makeQualified(file); FileStatus stat = fs.getFileStatus(file); resource.setResource(ConverterUtils.getYarnUrlFromPath(file)); resource.setSize(stat.getLen()); resource.setTimestamp(stat.getModificationTime()); resource.setType(type); resource.setVisibility(LocalResourceVisibility.APPLICATION); return resource; }
From source file:org.apache.ivory.cleanup.AbstractCleanupHandler.java
License:Apache License
protected void delete(Cluster cluster, Entity entity, long retention) throws IvoryException { FileStatus[] logs = getAllLogs(cluster, entity); long now = System.currentTimeMillis(); for (FileStatus log : logs) { if (now - log.getModificationTime() > retention) { try { boolean isDeleted = getFileSystem(cluster).delete(log.getPath(), true); if (isDeleted == false) { LOG.error("Unable to delete path: " + log.getPath()); } else { LOG.info("Deleted path: " + log.getPath()); }//from www . j a va 2 s . c o m deleteParentIfEmpty(getFileSystem(cluster), log.getPath().getParent()); } catch (IOException e) { throw new IvoryException(" Unable to delete log file : " + log.getPath() + " for entity " + entity.getName() + " for cluster: " + cluster.getName(), e); } } else { LOG.info("Retention limit: " + retention + " is less than modification" + (now - log.getModificationTime()) + " for path: " + log.getPath()); } } }
From source file:org.apache.ivory.service.SharedLibraryHostingService.java
License:Apache License
public static void pushLibsToHDFS(String path, Cluster cluster, PathFilter pathFilter) throws IOException { Configuration conf = ClusterHelper.getConfiguration(cluster); FileSystem fs = FileSystem.get(conf); String localPaths = StartupProperties.get().getProperty("system.lib.location"); assert localPaths != null && !localPaths.isEmpty() : "Invalid value for system.lib.location"; if (!new File(localPaths).isDirectory()) { LOG.warn(localPaths + " configured for system.lib.location doesn't contain any valid libs"); return;// ww w . j ava2 s . co m } for (File localFile : new File(localPaths).listFiles()) { Path clusterFile = new Path(path, localFile.getName()); if (!pathFilter.accept(clusterFile)) continue; if (fs.exists(clusterFile)) { FileStatus fstat = fs.getFileStatus(clusterFile); if (fstat.getLen() == localFile.length() && fstat.getModificationTime() == localFile.lastModified()) continue; } fs.copyFromLocalFile(false, true, new Path(localFile.getAbsolutePath()), clusterFile); fs.setTimes(clusterFile, localFile.lastModified(), System.currentTimeMillis()); LOG.info("Copied " + localFile.getAbsolutePath() + " to " + path + " in " + fs.getUri()); } }
From source file:org.apache.kylin.engine.mr.DFSFileTable.java
License:Apache License
public static Pair<Long, Long> getSizeAndLastModified(String path) throws IOException { FileSystem fs = HadoopUtil.getFileSystem(path); // get all contained files if path is directory ArrayList<FileStatus> allFiles = new ArrayList<>(); FileStatus status = fs.getFileStatus(new Path(path)); if (status.isFile()) { allFiles.add(status);/*from ww w.j a v a2 s .c o m*/ } else { FileStatus[] listStatus = fs.listStatus(new Path(path)); allFiles.addAll(Arrays.asList(listStatus)); } long size = 0; long lastModified = 0; for (FileStatus file : allFiles) { size += file.getLen(); lastModified = Math.max(lastModified, file.getModificationTime()); } return Pair.newPair(size, lastModified); }
From source file:org.apache.kylin.job.tools.DeployCoprocessorCLI.java
License:Apache License
public static Path uploadCoprocessorJar(String localCoprocessorJar, FileSystem fileSystem, Set<String> oldJarPaths) throws IOException { Path uploadPath = null;//from w w w .j a v a2s. c o m File localCoprocessorFile = new File(localCoprocessorJar); // check existing jars if (oldJarPaths == null) { oldJarPaths = new HashSet<String>(); } Path coprocessorDir = getCoprocessorHDFSDir(fileSystem, KylinConfig.getInstanceFromEnv()); for (FileStatus fileStatus : fileSystem.listStatus(coprocessorDir)) { if (fileStatus.getLen() == localCoprocessorJar.length() && fileStatus.getModificationTime() == localCoprocessorFile.lastModified()) { uploadPath = fileStatus.getPath(); break; } String filename = fileStatus.getPath().toString(); if (filename.endsWith(".jar")) { oldJarPaths.add(filename); } } // upload if not existing if (uploadPath == null) { // figure out a unique new jar file name Set<String> oldJarNames = new HashSet<String>(); for (String path : oldJarPaths) { oldJarNames.add(new Path(path).getName()); } String baseName = getBaseFileName(localCoprocessorJar); String newName = null; int i = 0; while (newName == null) { newName = baseName + "-" + (i++) + ".jar"; if (oldJarNames.contains(newName)) newName = null; } // upload uploadPath = new Path(coprocessorDir, newName); FileInputStream in = null; FSDataOutputStream out = null; try { in = new FileInputStream(localCoprocessorFile); out = fileSystem.create(uploadPath); IOUtils.copy(in, out); } finally { IOUtils.closeQuietly(in); IOUtils.closeQuietly(out); } fileSystem.setTimes(uploadPath, localCoprocessorFile.lastModified(), -1); } uploadPath = uploadPath.makeQualified(fileSystem.getUri(), null); return uploadPath; }
From source file:org.apache.kylin.storage.hbase.util.DeployCoprocessorCLI.java
License:Apache License
private static boolean isSame(File localCoprocessorFile, FileStatus fileStatus) { return fileStatus.getLen() == localCoprocessorFile.length() && fileStatus.getModificationTime() == localCoprocessorFile.lastModified(); }
From source file:org.apache.lens.server.query.QueryResultPurger.java
License:Apache License
private boolean canBePurged(FileStatus f, DateUtil.TimeDiff retention) { return f.getModificationTime() < retention.negativeOffsetFrom(Calendar.getInstance().getTime()).getTime(); }
From source file:org.apache.lucene.cassandra.HadoopFile.java
License:Apache License
public long lastModified() { // TODO set last modified. logger.info("lastModified()"); FileStatus[] status;// w w w . jav a2 s . c o m try { status = thePrivateFile.listStatus(path); for (FileStatus file : status) { return file.getModificationTime(); } } catch (FileNotFoundException e) { // TODO Auto-generated catch block e.printStackTrace(); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); } return 0; }
From source file:org.apache.manifoldcf.crawler.connectors.hdfs.HDFSRepositoryConnector.java
License:Apache License
/** Process a set of documents. * This is the method that should cause each document to be fetched, processed, and the results either added * to the queue of documents for the current job, and/or entered into the incremental ingestion manager. * The document specification allows this class to filter what is done based on the job. * The connector will be connected before this method can be called. *@param documentIdentifiers is the set of document identifiers to process. *@param statuses are the currently-stored document versions for each document in the set of document identifiers * passed in above./* w w w. jav a 2 s. co m*/ *@param activities is the interface this method should use to queue up new document references * and ingest documents. *@param jobMode is an integer describing how the job is being run, whether continuous or once-only. *@param usesDefaultAuthority will be true only if the authority in use for these documents is the default one. */ @Override public void processDocuments(String[] documentIdentifiers, IExistingVersions statuses, Specification spec, IProcessActivity activities, int jobMode, boolean usesDefaultAuthority) throws ManifoldCFException, ServiceInterruption { for (String documentIdentifier : documentIdentifiers) { String versionString; FileStatus fileStatus = getObject(new Path(documentIdentifier)); if (fileStatus != null) { boolean isDirectory = fileStatus.isDirectory(); if (isDirectory) { // If HDFS directory modify dates are transitive, as they are on Unix, // then getting the modify date of the current version is sufficient // to detect any downstream changes we need to be aware of. // (If this turns out to be a bad assumption, this should simply set rval[i] =""). long lastModified = fileStatus.getModificationTime(); versionString = new Long(lastModified).toString(); if (activities.checkDocumentNeedsReindexing(documentIdentifier, versionString)) { // Process directory! String entityReference = documentIdentifier; FileStatus[] fileStatuses = getChildren(fileStatus.getPath()); if (fileStatuses == null) { continue; } for (int j = 0; j < fileStatuses.length; j++) { FileStatus fs = fileStatuses[j++]; String canonicalPath = fs.getPath().toString(); if (checkInclude(session.getUri().toString(), fs, canonicalPath, spec)) { activities.addDocumentReference(canonicalPath, documentIdentifier, RELATIONSHIP_CHILD); } } } } else { long lastModified = fileStatus.getModificationTime(); StringBuilder sb = new StringBuilder(); // Check if the path is to be converted. We record that info in the version string so that we'll reindex documents whose // URI's change. String nameNode = nameNodeProtocol + "://" + nameNodeHost + ":" + nameNodePort; String convertPath = findConvertPath(nameNode, spec, fileStatus.getPath()); if (convertPath != null) { // Record the path. sb.append("+"); pack(sb, convertPath, '+'); } else sb.append("-"); sb.append(new Long(lastModified).toString()); versionString = sb.toString(); // We will record document fetch as an activity long startTime = System.currentTimeMillis(); String errorCode = null; String errorDesc = null; long fileSize = 0; if (activities.checkDocumentNeedsReindexing(documentIdentifier, versionString)) { // Process file! if (!checkIngest(session.getUri().toString(), fileStatus, spec)) { activities.noDocument(documentIdentifier, versionString); continue; } // It is a file to be indexed. long fileLength = fileStatus.getLen(); String fileName = fileStatus.getPath().getName(); String mimeType = mapExtensionToMimeType(fileStatus.getPath().getName()); Date modifiedDate = new Date(fileStatus.getModificationTime()); try { String uri; if (convertPath != null) { uri = convertToWGETURI(convertPath); } else { uri = fileStatus.getPath().toUri().toString(); } if (!activities.checkLengthIndexable(fileLength)) { errorCode = activities.EXCLUDED_LENGTH; errorDesc = "Excluding document because of file length ('" + fileLength + "')"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkURLIndexable(uri)) { errorCode = activities.EXCLUDED_URL; errorDesc = "Excluding document because of URL ('" + uri + "')"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkMimeTypeIndexable(mimeType)) { errorCode = activities.EXCLUDED_MIMETYPE; errorDesc = "Excluding document because of mime type (" + mimeType + ")"; activities.noDocument(documentIdentifier, versionString); continue; } if (!activities.checkDateIndexable(modifiedDate)) { errorCode = activities.EXCLUDED_DATE; errorDesc = "Excluding document because of date (" + modifiedDate + ")"; activities.noDocument(documentIdentifier, versionString); continue; } // Prepare the metadata part of RepositoryDocument RepositoryDocument data = new RepositoryDocument(); data.setFileName(fileName); data.setMimeType(mimeType); data.setModifiedDate(modifiedDate); data.addField("uri", uri); BackgroundStreamThread t = new BackgroundStreamThread(getSession(), new Path(documentIdentifier)); try { t.start(); boolean wasInterrupted = false; try { InputStream is = t.getSafeInputStream(); try { data.setBinary(is, fileSize); activities.ingestDocumentWithException(documentIdentifier, versionString, uri, data); } finally { is.close(); } } catch (java.net.SocketTimeoutException e) { throw e; } catch (InterruptedIOException e) { wasInterrupted = true; throw e; } catch (ManifoldCFException e) { if (e.getErrorCode() == ManifoldCFException.INTERRUPTED) { wasInterrupted = true; } throw e; } finally { if (!wasInterrupted) { // This does a join t.finishUp(); } } // No errors. Record the fact that we made it. errorCode = "OK"; // Length we did in bytes fileSize = fileStatus.getLen(); } catch (InterruptedException e) { // We were interrupted out of the join, most likely. Before we abandon the thread, // send a courtesy interrupt. t.interrupt(); throw new ManifoldCFException("Interrupted: " + e.getMessage(), e, ManifoldCFException.INTERRUPTED); } catch (java.net.SocketTimeoutException e) { errorCode = "IOERROR"; errorDesc = e.getMessage(); handleIOException(e); } catch (InterruptedIOException e) { t.interrupt(); throw new ManifoldCFException("Interrupted: " + e.getMessage(), e, ManifoldCFException.INTERRUPTED); } catch (IOException e) { errorCode = "IOERROR"; errorDesc = e.getMessage(); handleIOException(e); } } finally { if (errorCode != null) { activities.recordActivity(new Long(startTime), ACTIVITY_READ, new Long(fileSize), documentIdentifier, errorCode, errorDesc, null); } } } } } else { activities.deleteDocument(documentIdentifier); continue; } } }
From source file:org.apache.metron.maas.service.callback.LaunchContainer.java
License:Apache License
private Map.Entry<String, LocalResource> localizeResource(FileStatus status) { URL url = ConverterUtils.getYarnUrlFromURI(status.getPath().toUri()); LocalResource resource = LocalResource.newInstance(url, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, status.getLen(), status.getModificationTime()); String name = status.getPath().getName(); return new AbstractMap.SimpleEntry<>(name, resource); }