Example usage for org.apache.hadoop.fs FileStatus getModificationTime

List of usage examples for org.apache.hadoop.fs FileStatus getModificationTime

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getModificationTime.

Prototype

public long getModificationTime() 

Source Link

Document

Get the modification time of the file.

Usage

From source file:hws.core.ContainerUtils.java

License:Apache License

public static void setupContainerJar(FileSystem fs, Path jarPath, LocalResource containerJar)
        throws IOException {
    FileStatus jarStat = fs.getFileStatus(jarPath);
    containerJar.setResource(ConverterUtils.getYarnUrlFromPath(jarPath));
    containerJar.setSize(jarStat.getLen());
    containerJar.setTimestamp(jarStat.getModificationTime());
    containerJar.setType(LocalResourceType.FILE);
    containerJar.setVisibility(LocalResourceVisibility.PUBLIC);
}

From source file:io.amient.yarn1.YarnContainerContext.java

License:Open Source License

private void prepareLocalResourceFile(Map<String, LocalResource> localResources, String fileName,
        String remoteFileName, FileSystem distFs) throws IOException {
    final Path dst = new Path(distFs.getHomeDirectory(), remoteFileName);
    FileStatus scFileStatus = distFs.getFileStatus(dst);
    final URL yarnUrl = ConverterUtils.getYarnUrlFromURI(dst.toUri());
    LocalResource scRsrc = LocalResource.newInstance(yarnUrl, LocalResourceType.FILE,
            LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime());
    localResources.put(fileName, scRsrc);
}

From source file:io.aos.hdfs.basics.AbstractHdfsFileTest.java

License:Apache License

private void testGetLastModificationDateHdfsFile() throws IOException {
    getLogger().info("Testing HDFS file modification date.");
    FileStatus fileStatus = getFileSystem().getFileStatus(HDFS_FILE_1);
    long modificationTime = fileStatus.getModificationTime();
    Assert.assertNotSame(new Long(0), new Long(modificationTime));
}

From source file:io.aos.hdfs.basics.AbstractHdfsFileTest.java

License:Apache License

private void test6() throws IOException {
    Path path = new Path("fileName");
    FileStatus fileStatus = getFileSystem().getFileStatus(path);
    long modificationTime = fileStatus.getModificationTime();
}

From source file:io.aos.hdfs.ShowFileStatusTest.java

License:Apache License

@Test
public void fileStatusForFile() throws IOException {
    Path file = new Path("/dir/file");
    FileStatus stat = fs.getFileStatus(file);
    assertThat(stat.getPath().toUri().getPath(), is("/dir/file"));
    assertThat(stat.isDir(), is(false));
    assertThat(stat.getLen(), is(7L));//  ww w . j  a  v a2  s  .com
    assertThat(stat.getModificationTime(), is(lessThanOrEqualTo(System.currentTimeMillis())));
    assertThat(stat.getReplication(), is((short) 1));
    assertThat(stat.getBlockSize(), is(64 * 1024 * 1024L));
    assertThat(stat.getOwner(), is("tom"));
    assertThat(stat.getGroup(), is("supergroup"));
    assertThat(stat.getPermission().toString(), is("rw-r--r--"));
}

From source file:io.aos.hdfs.ShowFileStatusTest.java

License:Apache License

@Test
public void fileStatusForDirectory() throws IOException {
    Path dir = new Path("/dir");
    FileStatus stat = fs.getFileStatus(dir);
    assertThat(stat.getPath().toUri().getPath(), is("/dir"));
    assertThat(stat.isDir(), is(true));/*  w  w w.j a v  a2  s  .c o m*/
    assertThat(stat.getLen(), is(0L));
    assertThat(stat.getModificationTime(), is(lessThanOrEqualTo(System.currentTimeMillis())));
    assertThat(stat.getReplication(), is((short) 0));
    assertThat(stat.getBlockSize(), is(0L));
    assertThat(stat.getOwner(), is("tom"));
    assertThat(stat.getGroup(), is("supergroup"));
    assertThat(stat.getPermission().toString(), is("rwxr-xr-x"));
}

From source file:io.apigee.lembos.node.types.DistributedCacheWrap.java

License:Apache License

/**
 * Java wrapper for {@link DistributedCache#getFileStatus(Configuration, URI)}.
 *
 * @param ctx the JavaScript context/*from  w  ww.j a v a2  s .c om*/
 * @param thisObj the 'this' object
 * @param args the function arguments
 * @param func the function being called
 *
 * @return array of archive class paths
 */
@JSStaticFunction
public static Object getFileStatus(final Context ctx, final Scriptable thisObj, final Object[] args,
        final Function func) {
    final Object arg0 = args.length >= 1 ? args[0] : Undefined.instance;
    final Object arg1 = args.length >= 2 ? args[1] : Undefined.instance;

    if (args.length < 2) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.TWO_ARGS_EXPECTED);
    } else if (!JavaScriptUtils.isDefined(arg0)) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.FIRST_ARG_REQUIRED);
    } else if (!JavaScriptUtils.isDefined(arg1)) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.SECOND_ARG_REQUIRED);
    } else if (!(arg0 instanceof ConfigurationWrap)) {
        throw Utils.makeError(ctx, thisObj, LembosMessages.FIRST_ARG_MUST_BE_CONF);
    }

    final URI hdfsUri = URI.create(arg1.toString());
    FileStatus status;

    try {
        status = DistributedCache.getFileStatus(((ConfigurationWrap) arg0).getConf(), hdfsUri);
    } catch (IOException e) {
        throw Utils.makeError(ctx, thisObj, e.getMessage());
    }

    if (status == null) {
        throw Utils.makeError(ctx, thisObj, "Unable to get file status for HDFS uri: " + hdfsUri.toString());
    }

    final Scriptable jsStatus = ctx.newObject(thisObj);

    ScriptableObject.defineProperty(jsStatus, "accessTime", status.getAccessTime(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "blockSize", status.getBlockSize(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "group", status.getGroup(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "len", status.getLen(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "modificationTime", status.getModificationTime(),
            ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "owner", status.getOwner(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "path", status.getPath().toString(), ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "permission", status.getPermission().toString(),
            ScriptableObject.READONLY);
    ScriptableObject.defineProperty(jsStatus, "replication", status.getReplication(),
            ScriptableObject.READONLY);

    return jsStatus;
}

From source file:io.dataapps.chlorine.hadoop.NewFilesFilter.java

License:Apache License

@Override
public boolean accept(Path path) {
    try {/*from w  ww.ja va  2 s  .com*/
        FileSystem fs = FileSystem.get(conf);
        if (fs.isDirectory(path)) {
            return true;
        }
        FileStatus file = fs.getFileStatus(path);
        long time = file.getModificationTime();
        boolean result = (time > cutOffTime);
        return result;
    } catch (IOException e) {
        LOG.error(e);
        return true;
    }
}

From source file:io.druid.indexer.JobHelper.java

License:Apache License

/**
 * Rename the files. This works around some limitations of both FileContext (no s3n support) and NativeS3FileSystem.rename
 * which will not overwrite//from  w  ww . ja v a2  s  .  c  om
 *
 * @param outputFS              The output fs
 * @param indexZipFilePath      The original file path
 * @param finalIndexZipFilePath The to rename the original file to
 *
 * @return False if a rename failed, true otherwise (rename success or no rename needed)
 */
private static boolean renameIndexFiles(final FileSystem outputFS, final Path indexZipFilePath,
        final Path finalIndexZipFilePath) {
    try {
        return RetryUtils.retry(new Callable<Boolean>() {
            @Override
            public Boolean call() throws Exception {
                final boolean needRename;

                if (outputFS.exists(finalIndexZipFilePath)) {
                    // NativeS3FileSystem.rename won't overwrite, so we might need to delete the old index first
                    final FileStatus zipFile = outputFS.getFileStatus(indexZipFilePath);
                    final FileStatus finalIndexZipFile = outputFS.getFileStatus(finalIndexZipFilePath);

                    if (zipFile.getModificationTime() >= finalIndexZipFile.getModificationTime()
                            || zipFile.getLen() != finalIndexZipFile.getLen()) {
                        log.info("File[%s / %s / %sB] existed, but wasn't the same as [%s / %s / %sB]",
                                finalIndexZipFile.getPath(),
                                new DateTime(finalIndexZipFile.getModificationTime()),
                                finalIndexZipFile.getLen(), zipFile.getPath(),
                                new DateTime(zipFile.getModificationTime()), zipFile.getLen());
                        outputFS.delete(finalIndexZipFilePath, false);
                        needRename = true;
                    } else {
                        log.info("File[%s / %s / %sB] existed and will be kept", finalIndexZipFile.getPath(),
                                new DateTime(finalIndexZipFile.getModificationTime()),
                                finalIndexZipFile.getLen());
                        needRename = false;
                    }
                } else {
                    needRename = true;
                }

                if (needRename) {
                    log.info("Attempting rename from [%s] to [%s]", indexZipFilePath, finalIndexZipFilePath);
                    return outputFS.rename(indexZipFilePath, finalIndexZipFilePath);
                } else {
                    return true;
                }
            }
        }, FileUtils.IS_EXCEPTION, NUM_RETRIES);
    } catch (Exception e) {
        throw Throwables.propagate(e);
    }
}

From source file:io.druid.indexer.path.GranularUnprocessedPathSpec.java

License:Apache License

@Override
public Job addInputPaths(HadoopDruidIndexerConfig config, Job job) throws IOException {
    // This PathSpec breaks so many abstractions that we might as break some more
    Preconditions.checkState(config.getGranularitySpec() instanceof UniformGranularitySpec,
            String.format("Cannot use %s without %s", GranularUnprocessedPathSpec.class.getSimpleName(),
                    UniformGranularitySpec.class.getSimpleName()));

    final Path betaInput = new Path(getInputPath());
    final FileSystem fs = betaInput.getFileSystem(job.getConfiguration());
    final Granularity segmentGranularity = config.getGranularitySpec().getSegmentGranularity();

    Map<DateTime, Long> inputModifiedTimes = new TreeMap<>(Comparators.inverse(Comparators.comparable()));

    for (FileStatus status : FSSpideringIterator.spiderIterable(fs, betaInput)) {
        final DateTime key = segmentGranularity.toDate(status.getPath().toString());
        final Long currVal = inputModifiedTimes.get(key);
        final long mTime = status.getModificationTime();

        inputModifiedTimes.put(key, currVal == null ? mTime : Math.max(currVal, mTime));
    }/*w  w  w.  ja v  a 2 s.co  m*/

    Set<Interval> bucketsToRun = Sets.newTreeSet(Comparators.intervals());
    for (Map.Entry<DateTime, Long> entry : inputModifiedTimes.entrySet()) {
        DateTime timeBucket = entry.getKey();
        long mTime = entry.getValue();

        String bucketOutput = String.format("%s/%s", config.getSchema().getIOConfig().getSegmentOutputPath(),
                segmentGranularity.toPath(timeBucket));
        for (FileStatus fileStatus : FSSpideringIterator.spiderIterable(fs, new Path(bucketOutput))) {
            if (fileStatus.getModificationTime() > mTime) {
                bucketsToRun.add(new Interval(timeBucket, segmentGranularity.increment(timeBucket)));
                break;
            }
        }

        if (bucketsToRun.size() >= maxBuckets) {
            break;
        }
    }

    config.setGranularitySpec(new UniformGranularitySpec(segmentGranularity,
            config.getGranularitySpec().getQueryGranularity(), Lists.newArrayList(bucketsToRun)));

    return super.addInputPaths(config, job);
}