Example usage for org.apache.hadoop.fs FileStatus getModificationTime

List of usage examples for org.apache.hadoop.fs FileStatus getModificationTime

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getModificationTime.

Prototype

public long getModificationTime() 

Source Link

Document

Get the modification time of the file.

Usage

From source file:com.jkoolcloud.tnt4j.streams.inputs.HdfsFileLineStreamTest.java

License:Apache License

@Test()
public void test() throws Exception {
    FileSystem fs = mock(FileSystem.class);
    HdfsFileLineStream stream = new HdfsFileLineStream();

    TestFileList files = new TestFileList(false);

    final String fileName = ("file:////" + files.get(0).getParentFile() + File.separator + files.getPrefix() // NON-NLS
            + "*.TST").replace("\\", "/"); // NON-NLS

    Map<String, String> props = new HashMap<>(2);
    props.put(StreamProperties.PROP_FILENAME, fileName);
    props.put(StreamProperties.PROP_RESTORE_STATE, "false"); // NON-NLS

    when(fs.open(any(Path.class))).thenReturn(new FSDataInputStream(new TestInputStreamStub()));
    final FileStatus fileStatusMock = mock(FileStatus.class);
    final FileStatus[] array = new FileStatus[10];
    Arrays.fill(array, fileStatusMock);
    when(fs.listStatus(any(Path.class), any(PathFilter.class))).thenReturn(array);
    when(fileStatusMock.getModificationTime()).thenReturn(1L, 2L, 3L);
    when(fileStatusMock.getPath()).thenReturn(mock(Path.class));
    when(fs.getContentSummary(any(Path.class))).thenReturn(mock(ContentSummary.class));

    Method m = FileSystem.class.getDeclaredMethod("addFileSystemForTesting", URI.class, Configuration.class, // NON-NLS
            FileSystem.class);
    m.setAccessible(true);//from  w  w w  . ja  v a  2s .  c  o  m
    m.invoke(FileSystem.class, URI.create(fileName), new Configuration(), fs);

    StreamThread st = mock(StreamThread.class);
    st.setName("HdfsFileLineStreamTestThreadName"); // NON-NLS
    stream.setOwnerThread(st);

    stream.setProperties(props.entrySet());
    stream.startStream();

    verify(fileStatusMock, atLeastOnce()).getModificationTime();
    verify(fileStatusMock, atLeastOnce()).getPath();
    verify(fs, atLeastOnce()).listStatus(any(Path.class), any(PathFilter.class));

    stream.cleanup();
}

From source file:com.kylinolap.dict.lookup.FileTable.java

License:Apache License

@Override
public TableSignature getSignature() throws IOException {
    FileSystem fs = HadoopUtil.getFileSystem(path);
    FileStatus status = fs.getFileStatus(new Path(path));
    return new TableSignature(path, status.getLen(), status.getModificationTime());
}

From source file:com.kylinolap.job.tools.DeployCoprocessorCLI.java

License:Apache License

public static Path getNewestCoprocessorJar(KylinConfig config, FileSystem fileSystem) throws IOException {
    Path coprocessorDir = getCoprocessorHDFSDir(fileSystem, config);
    FileStatus newestJar = null;
    for (FileStatus fileStatus : fileSystem.listStatus(coprocessorDir)) {
        if (fileStatus.getPath().toString().endsWith(".jar")) {
            if (newestJar == null) {
                newestJar = fileStatus;// ww w. j  a  v a 2  s  . c  om
            } else {
                if (newestJar.getModificationTime() < fileStatus.getModificationTime())
                    newestJar = fileStatus;
            }
        }
    }
    if (newestJar == null)
        return null;

    Path path = newestJar.getPath().makeQualified(fileSystem.getUri(), null);
    logger.info("The newest coprocessor is " + path.toString());
    return path;
}

From source file:com.kylinolap.job.tools.DeployCoprocessorCLI.java

License:Apache License

public static Path uploadCoprocessorJar(String localCoprocessorJar, FileSystem fileSystem,
        Set<String> oldJarPaths) throws IOException {
    Path uploadPath = null;//www .  jav a  2s .c  o  m
    File localCoprocessorFile = new File(localCoprocessorJar);

    // check existing jars
    if (oldJarPaths == null) {
        oldJarPaths = new HashSet<String>();
    }
    Path coprocessorDir = getCoprocessorHDFSDir(fileSystem, KylinConfig.getInstanceFromEnv());
    for (FileStatus fileStatus : fileSystem.listStatus(coprocessorDir)) {
        if (fileStatus.getLen() == localCoprocessorJar.length()
                && fileStatus.getModificationTime() == localCoprocessorFile.lastModified()) {
            uploadPath = fileStatus.getPath();
            break;
        }
        String filename = fileStatus.getPath().toString();
        if (filename.endsWith(".jar")) {
            oldJarPaths.add(filename);
        }
    }

    // upload if not existing
    if (uploadPath == null) {
        // figure out a unique new jar file name
        Set<String> oldJarNames = new HashSet<String>();
        for (String path : oldJarPaths) {
            oldJarNames.add(new Path(path).getName());
        }
        String baseName = getBaseFileName(localCoprocessorJar);
        String newName = null;
        int i = 0;
        while (newName == null) {
            newName = baseName + "-" + (i++) + ".jar";
            if (oldJarNames.contains(newName))
                newName = null;
        }

        // upload
        uploadPath = new Path(coprocessorDir, newName);
        FileInputStream in = null;
        FSDataOutputStream out = null;
        try {
            in = new FileInputStream(localCoprocessorFile);
            out = fileSystem.create(uploadPath);
            IOUtils.copy(in, out);
        } finally {
            IOUtils.closeQuietly(in);
            IOUtils.closeQuietly(out);
        }

        fileSystem.setTimes(uploadPath, localCoprocessorFile.lastModified(), System.currentTimeMillis());

    }

    uploadPath = uploadPath.makeQualified(fileSystem.getUri(), null);
    return uploadPath;
}

From source file:com.linkedin.hadoop.jobs.HdfsWaitJob.java

License:Apache License

/**
 * Method checkDirectory loops through the folders pointed to by dirPath, and will
 * cause the job to succeed if any of the folders are fresh enough. However, if the
 * parameter checkExactPath is true, this method only checks for the existence of
 * dirPath in HDFS.//from  w  w  w  .  j a  v  a2 s.c  o m
 *
 * @param dirPath The path to the directory we are searching for fresh folders
 * @param freshness The timeframe in which the folder has to have been modified by
 * @param checkExactPath The boolean that decides if we only check for the existence of dirPath in HDFS
 * @throws IOException If there is an HDFS exception
 * @return A boolean value corresponding to whether a fresh folder was found
 */
public boolean checkDirectory(String dirPath, long freshness, boolean checkExactPath)
        throws IOException, NullPointerException {
    FileSystem fileSys = FileSystem.get(getConf());

    if (fileSys == null) {
        String errMessage = "ERROR: The file system trying to be accessed does not exist. JOB TERMINATED.";
        log.info(errMessage);
        throw new NullPointerException(errMessage);
    }

    if (checkExactPath) {
        if (fileSys.exists(new Path(dirPath))) {
            log.info("SUCCESS: The exact path: " + dirPath + " was found in HDFS. Program now quitting.");
            return true;
        }
        log.info("STATUS: The exact path: " + dirPath + " was not found during latest polling.");
        return false;
    }

    FileStatus[] status = fileSys.listStatus(new Path(dirPath));

    if (status == null) {
        String errMessage = "ERROR: dirPath -> " + dirPath + " is empty or does not exist. JOB TERMINATED.";
        log.info(errMessage);
        throw new IOException(errMessage);
    }

    for (FileStatus file : status) {
        if (file.isDirectory()) {
            long timeModified = file.getModificationTime();
            if ((System.currentTimeMillis() - timeModified) <= freshness) {
                String fileName = file.getPath().toString();
                log.info("We found this fresh folder in the filePath: "
                        + fileName.substring(fileName.lastIndexOf("/") + 1));
                log.info("SUCCESS: Program now quitting after successfully finding a fresh folder.");
                return true;
            }
        }
    }
    log.info("STATUS: No fresh folders found during latest polling.");
    return false;
}

From source file:com.lithium.flow.filer.HdfsFiler.java

License:Apache License

private Record getRecordForStatus(@Nonnull FileStatus status, @Nonnull String parent) {
    String name = status.getPath().getName();
    long time = status.getModificationTime();
    long size = status.getLen();
    boolean directory = status.isDirectory();
    return new Record(getUri(), parent, name, time, size, directory);
}

From source file:com.metamx.druid.indexer.path.GranularUnprocessedPathSpec.java

License:Open Source License

@Override
public Job addInputPaths(HadoopDruidIndexerConfig config, Job job) throws IOException {
    // This PathSpec breaks so many abstractions that we might as break some more
    Preconditions.checkState(config.getGranularitySpec() instanceof UniformGranularitySpec,
            String.format("Cannot use %s without %s", GranularUnprocessedPathSpec.class.getSimpleName(),
                    UniformGranularitySpec.class.getSimpleName()));

    final Path betaInput = new Path(getInputPath());
    final FileSystem fs = betaInput.getFileSystem(job.getConfiguration());
    final Granularity segmentGranularity = ((UniformGranularitySpec) config.getGranularitySpec())
            .getGranularity();//from   ww w  .j  a  v a  2 s.  c o m

    Map<DateTime, Long> inputModifiedTimes = new TreeMap<DateTime, Long>(
            Comparators.inverse(Comparators.<Comparable>comparable()));

    for (FileStatus status : FSSpideringIterator.spiderIterable(fs, betaInput)) {
        final DateTime key = segmentGranularity.toDate(status.getPath().toString());
        final Long currVal = inputModifiedTimes.get(key);
        final long mTime = status.getModificationTime();

        inputModifiedTimes.put(key, currVal == null ? mTime : Math.max(currVal, mTime));
    }

    Set<Interval> bucketsToRun = Sets.newTreeSet(Comparators.intervals());
    for (Map.Entry<DateTime, Long> entry : inputModifiedTimes.entrySet()) {
        DateTime timeBucket = entry.getKey();
        long mTime = entry.getValue();

        String bucketOutput = String.format("%s/%s", config.getSegmentOutputDir(),
                segmentGranularity.toPath(timeBucket));
        for (FileStatus fileStatus : FSSpideringIterator.spiderIterable(fs, new Path(bucketOutput))) {
            if (fileStatus.getModificationTime() > mTime) {
                bucketsToRun.add(new Interval(timeBucket, segmentGranularity.increment(timeBucket)));
                break;
            }
        }

        if (bucketsToRun.size() >= maxBuckets) {
            break;
        }
    }

    config.setGranularitySpec(new UniformGranularitySpec(segmentGranularity, Lists.newArrayList(bucketsToRun)));

    return super.addInputPaths(config, job);
}

From source file:com.nearinfinity.blur.store.hdfs.HdfsDirectory.java

License:Apache License

@Override
public long fileModified(String name) throws IOException {
    name = getRealName(name);/*w  w w.  j ava2 s. c  om*/
    if (!fileExists(name)) {
        throw new FileNotFoundException(name);
    }
    FileStatus fileStatus = getFileSystem().getFileStatus(new Path(_hdfsDirPath, name));
    return fileStatus.getModificationTime();
}

From source file:com.nearinfinity.mele.store.hdfs.HdfsDirectory.java

License:Apache License

@Override
public long fileModified(String name) throws IOException {
    FileStatus fileStatus = fileSystem.getFileStatus(new Path(hdfsDirPath, name));
    return fileStatus.getModificationTime();
}

From source file:com.netflix.suro.sink.localfile.LocalFileSink.java

License:Apache License

/**
 * List all files under the directory. If the file is marked as done, the
 * notice for that file would be sent. Otherwise, it checks the file
 * is not closed properly, the file is marked as done and the notice
 * would be sent. That file would cause EOFException when reading.
 *
 * @param dir/*ww w  . j  a v a 2  s.  c o m*/
 * @return the number of files found in the directory
 */
public int cleanUp(String dir, boolean fetchAll) {
    if (!dir.endsWith("/")) {
        dir += "/";
    }

    int count = 0;

    try {
        FileSystem fs = writer.getFS();
        FileStatus[] files = fs.listStatus(new Path(dir));
        for (FileStatus file : files) {
            if (file.getLen() > 0) {
                String fileName = file.getPath().getName();
                String fileExt = getFileExt(fileName);
                if (fileExt != null && fileExt.equals(done)) {
                    notice.send(dir + fileName);
                    ++count;
                } else if (fileExt != null) {
                    long lastPeriod = new DateTime().minus(rotationPeriod).minus(rotationPeriod).getMillis();
                    if (file.getModificationTime() < lastPeriod) {
                        ++errorClosedFiles;
                        DynamicCounter.increment("closedFileError");
                        log.error(dir + fileName + " is not closed properly!!!");
                        String doneFile = fileName.replace(fileExt, done);
                        writer.setDone(dir + fileName, dir + doneFile);
                        notice.send(dir + doneFile);
                        ++count;
                    } else if (fetchAll) {
                        ++count;
                    }
                }
            }
        }
    } catch (Exception e) {
        log.error("Exception while on cleanUp: " + e.getMessage(), e);
        return Integer.MAX_VALUE; // return non-zero value
    }

    return count;
}