Example usage for org.apache.hadoop.fs LocatedFileStatus getModificationTime

List of usage examples for org.apache.hadoop.fs LocatedFileStatus getModificationTime

Introduction

In this page you can find the example usage for org.apache.hadoop.fs LocatedFileStatus getModificationTime.

Prototype

public long getModificationTime() 

Source Link

Document

Get the modification time of the file.

Usage

From source file:com.alibaba.jstorm.hdfs.common.HdfsUtils.java

License:Apache License

/** list files sorted by modification time that have not been modified since 'olderThan'. if
 * 'olderThan' is <= 0 then the filtering is disabled */
public static ArrayList<Path> listFilesByModificationTime(FileSystem fs, Path directory, long olderThan)
        throws IOException {
    ArrayList<LocatedFileStatus> fstats = new ArrayList<>();

    RemoteIterator<LocatedFileStatus> itr = fs.listFiles(directory, false);
    while (itr.hasNext()) {
        LocatedFileStatus fileStatus = itr.next();
        if (olderThan > 0) {
            if (fileStatus.getModificationTime() <= olderThan)
                fstats.add(fileStatus);/*from w ww .  j  a  v  a  2s.  com*/
        } else {
            fstats.add(fileStatus);
        }
    }
    Collections.sort(fstats, new ModifTimeComparator());

    ArrayList<Path> result = new ArrayList<>(fstats.size());
    for (LocatedFileStatus fstat : fstats) {
        result.add(fstat.getPath());
    }
    return result;
}

From source file:com.toy.TomcatContainerRunnable.java

License:Apache License

@Override
public void run() {
    LOG.info("Setting up Tomcat container launch for container id {} / war {}", container.getId(), war);
    ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class);
    // Set the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();
    try {//  w w  w. j  a v a2  s .c  o m
        final RemoteIterator<LocatedFileStatus> libs = fs.listFiles(path, false);
        while (libs.hasNext()) {
            final LocatedFileStatus next = libs.next();
            LOG.debug("Register {} for container", next.getPath());
            LocalResource lib = Records.newRecord(LocalResource.class);
            lib.setType(LocalResourceType.FILE);
            lib.setVisibility(LocalResourceVisibility.APPLICATION);
            lib.setResource(ConverterUtils.getYarnUrlFromURI(next.getPath().toUri()));
            lib.setTimestamp(next.getModificationTime());
            lib.setSize(next.getLen());
            localResources.put(next.getPath().getName(), lib);
        }
        ctx.setLocalResources(localResources);
    } catch (IOException e) {
        LOG.error("Error while fetching Tomcat libraries : {}", e.getLocalizedMessage(), e);
    }

    // Build classpath
    StringBuilder classPathEnv = new StringBuilder(ApplicationConstants.Environment.CLASSPATH.$())
            .append(File.pathSeparatorChar).append("./*");
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    Map<String, String> env = new HashMap<String, String>();
    env.put("CLASSPATH", classPathEnv.toString());
    env.put(Constants.WAR, war);
    env.put(Constants.ZOOKEEPER_QUORUM, System.getenv(Constants.ZOOKEEPER_QUORUM));
    ctx.setEnvironment(env);

    // Set the necessary command to execute the application master
    Vector<CharSequence> vargs = new Vector<CharSequence>(30);
    // Set java executable command
    LOG.info("Setting up app master command");
    vargs.add(ApplicationConstants.Environment.JAVA_HOME.$() + "/bin/java");
    // Set Xmx based on am memory size
    vargs.add("-Xmx" + 32 + "m");
    vargs.add("com.toy.TomcatLauncher");
    vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/Tomcat.stdout");
    vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/Tomcat.stderr");

    // Get final commmand
    StringBuilder command = new StringBuilder();
    for (CharSequence str : vargs) {
        command.append(str).append(" ");
    }

    LOG.info("Completed setting up app master command " + command.toString());
    List<String> commands = new ArrayList<String>();
    commands.add(command.toString());
    ctx.setCommands(commands);

    nmClientAsync.startContainerAsync(container, ctx);
}

From source file:org.apache.druid.storage.hdfs.HdfsDataSegmentFinder.java

License:Apache License

@Override
public Set<DataSegment> findSegments(String workingDirPathStr, boolean updateDescriptor)
        throws SegmentLoadingException {
    final Map<String, Pair<DataSegment, Long>> timestampedSegments = new HashMap<>();
    final Path workingDirPath = new Path(workingDirPathStr);
    FileSystem fs;/*from w w  w .  ja v  a2  s  .  co m*/
    try {
        fs = workingDirPath.getFileSystem(config);

        log.info(fs.getScheme());
        log.info("FileSystem URI:" + fs.getUri().toString());

        if (!fs.exists(workingDirPath)) {
            throw new SegmentLoadingException("Working directory [%s] doesn't exist.", workingDirPath);
        }

        if (!fs.isDirectory(workingDirPath)) {
            throw new SegmentLoadingException("Working directory [%s] is not a directory!?", workingDirPath);
        }

        final RemoteIterator<LocatedFileStatus> it = fs.listFiles(workingDirPath, true);
        while (it.hasNext()) {
            final LocatedFileStatus locatedFileStatus = it.next();
            final Path path = locatedFileStatus.getPath();
            if (path.getName().endsWith("descriptor.json")) {

                // There are 3 supported path formats:
                //    - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum/descriptor.json
                //    - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_descriptor.json
                //    - hdfs://nn1/hdfs_base_directory/data_source_name/interval/version/shardNum_UUID_descriptor.json
                final String descriptorParts[] = path.getName().split("_");

                Path indexZip = new Path(path.getParent(), "index.zip");
                if (descriptorParts.length > 1) {
                    Preconditions
                            .checkState(
                                    descriptorParts.length <= 3
                                            && org.apache.commons.lang.StringUtils.isNumeric(descriptorParts[0])
                                            && "descriptor.json"
                                                    .equals(descriptorParts[descriptorParts.length - 1]),
                                    "Unexpected descriptor filename format [%s]", path);

                    indexZip = new Path(path.getParent(), StringUtils.format("%s_%sindex.zip",
                            descriptorParts[0], descriptorParts.length == 2 ? "" : descriptorParts[1] + "_"));
                }

                if (fs.exists(indexZip)) {
                    final DataSegment dataSegment = mapper.readValue(fs.open(path), DataSegment.class);
                    log.info("Found segment [%s] located at [%s]", dataSegment.getIdentifier(), indexZip);

                    final Map<String, Object> loadSpec = dataSegment.getLoadSpec();
                    final String pathWithoutScheme = indexZip.toUri().getPath();

                    if (!loadSpec.get("type").equals(HdfsStorageDruidModule.SCHEME)
                            || !loadSpec.get("path").equals(pathWithoutScheme)) {
                        loadSpec.put("type", HdfsStorageDruidModule.SCHEME);
                        loadSpec.put("path", pathWithoutScheme);
                        if (updateDescriptor) {
                            log.info("Updating loadSpec in descriptor.json at [%s] with new path [%s]", path,
                                    pathWithoutScheme);
                            mapper.writeValue(fs.create(path, true), dataSegment);
                        }
                    }

                    DataSegmentFinder.putInMapRetainingNewest(timestampedSegments, dataSegment,
                            locatedFileStatus.getModificationTime());
                } else {
                    throw new SegmentLoadingException(
                            "index.zip didn't exist at [%s] while descripter.json exists!?", indexZip);
                }
            }
        }
    } catch (IOException e) {
        throw new SegmentLoadingException(e, "Problems interacting with filesystem[%s].", workingDirPath);
    }

    return timestampedSegments.values().stream().map(x -> x.lhs).collect(Collectors.toSet());
}

From source file:org.apache.druid.storage.hdfs.tasklog.HdfsTaskLogs.java

License:Apache License

@Override
public void killOlderThan(long timestamp) throws IOException {
    Path taskLogDir = new Path(config.getDirectory());
    FileSystem fs = taskLogDir.getFileSystem(hadoopConfig);
    if (fs.exists(taskLogDir)) {

        if (!fs.isDirectory(taskLogDir)) {
            throw new IOE("taskLogDir [%s] must be a directory.", taskLogDir);
        }//from   w  ww . j a va  2  s.  com

        RemoteIterator<LocatedFileStatus> iter = fs.listLocatedStatus(taskLogDir);
        while (iter.hasNext()) {
            LocatedFileStatus file = iter.next();
            if (file.getModificationTime() < timestamp) {
                Path p = file.getPath();
                log.info("Deleting hdfs task log [%s].", p.toUri().toString());
                fs.delete(p, true);
            }

            if (Thread.currentThread().isInterrupted()) {
                throw new IOException(
                        new InterruptedException("Thread interrupted. Couldn't delete all tasklogs."));
            }
        }
    }
}

From source file:org.apache.impala.catalog.HdfsTable.java

License:Apache License

/**
 * Drops and re-loads the block metadata for all partitions in 'partsByPath' whose
 * location is under the given 'dirPath'. It involves the following steps:
 * - Clear the current block metadata of the partitions.
 * - Call FileSystem.listStatus() on 'dirPath' to fetch the BlockLocations for each
 *   file under it recursively./*from w w  w  .j a  v a2  s.c  o m*/
 * - For every valid data file, map it to a partition from 'partsByPath' (if one exists)
 *   and enumerate all its blocks and their corresponding hosts and disk IDs.
 * Requires that 'dirPath' and all paths in 'partsByPath' have consistent qualification
 * (either fully qualified or unqualified), for isDescendantPath().
 * TODO: Split this method into more logical methods for cleaner code.
 */
private void loadBlockMetadata(Path dirPath, HashMap<Path, List<HdfsPartition>> partsByPath) {
    try {
        FileSystem fs = dirPath.getFileSystem(CONF);
        // No need to load blocks for empty partitions list.
        if (partsByPath.size() == 0 || !fs.exists(dirPath))
            return;
        if (LOG.isTraceEnabled()) {
            LOG.trace("Loading block md for " + name_ + " directory " + dirPath.toString());
        }

        // Clear the state of partitions under dirPath since they are going to be updated
        // based on the current snapshot of files in the directory.
        List<HdfsPartition> dirPathPartitions = partsByPath.get(dirPath);
        if (dirPathPartitions != null) {
            // The dirPath is a partition directory. This means the path is the root of an
            // unpartitioned table, or the path of at least one partition.
            for (HdfsPartition partition : dirPathPartitions) {
                partition.setFileDescriptors(new ArrayList<FileDescriptor>());
            }
        } else {
            // The dirPath is not a partition directory. We expect it to be an ancestor of
            // partition paths (e.g., the table root). Clear all partitions whose paths are
            // a descendant of dirPath.
            for (Map.Entry<Path, List<HdfsPartition>> entry : partsByPath.entrySet()) {
                Path partDir = entry.getKey();
                if (!FileSystemUtil.isDescendantPath(partDir, dirPath))
                    continue;
                for (HdfsPartition partition : entry.getValue()) {
                    partition.setFileDescriptors(new ArrayList<FileDescriptor>());
                }
            }
        }

        // For file systems that do not support BlockLocation API, we manually synthesize
        // block location metadata based on file formats.
        if (!FileSystemUtil.supportsStorageIds(fs)) {
            synthesizeBlockMetadata(fs, dirPath, partsByPath);
            return;
        }

        int unknownDiskIdCount = 0;
        RemoteIterator<LocatedFileStatus> fileStatusIter = fs.listFiles(dirPath, true);
        while (fileStatusIter.hasNext()) {
            LocatedFileStatus fileStatus = fileStatusIter.next();
            if (!FileSystemUtil.isValidDataFile(fileStatus))
                continue;
            // Find the partition that this file belongs (if any).
            Path partPathDir = fileStatus.getPath().getParent();
            Preconditions.checkNotNull(partPathDir);

            List<HdfsPartition> partitions = partsByPath.get(partPathDir);
            // Skip if this file does not belong to any known partition.
            if (partitions == null) {
                if (LOG.isTraceEnabled()) {
                    LOG.trace("File " + fileStatus.getPath().toString() + " doesn't correspond "
                            + " to a known partition. Skipping metadata load for this file.");
                }
                continue;
            }
            String fileName = fileStatus.getPath().getName();
            FileDescriptor fd = new FileDescriptor(fileName, fileStatus.getLen(),
                    fileStatus.getModificationTime());
            BlockLocation[] locations = fileStatus.getBlockLocations();
            String partPathDirName = partPathDir.toString();
            for (BlockLocation loc : locations) {
                Set<String> cachedHosts = Sets.newHashSet(loc.getCachedHosts());
                // Enumerate all replicas of the block, adding any unknown hosts
                // to hostIndex_. We pick the network address from getNames() and
                // map it to the corresponding hostname from getHosts().
                List<BlockReplica> replicas = Lists.newArrayListWithExpectedSize(loc.getNames().length);
                for (int i = 0; i < loc.getNames().length; ++i) {
                    TNetworkAddress networkAddress = BlockReplica.parseLocation(loc.getNames()[i]);
                    replicas.add(new BlockReplica(hostIndex_.getIndex(networkAddress),
                            cachedHosts.contains(loc.getHosts()[i])));
                }
                FileBlock currentBlock = new FileBlock(loc.getOffset(), loc.getLength(), replicas);
                THdfsFileBlock tHdfsFileBlock = currentBlock.toThrift();
                fd.addThriftFileBlock(tHdfsFileBlock);
                unknownDiskIdCount += loadDiskIds(loc, tHdfsFileBlock);
            }
            if (LOG.isTraceEnabled()) {
                LOG.trace("Adding file md dir: " + partPathDirName + " file: " + fileName);
            }
            // Update the partitions' metadata that this file belongs to.
            for (HdfsPartition partition : partitions) {
                partition.getFileDescriptors().add(fd);
                numHdfsFiles_++;
                totalHdfsBytes_ += fd.getFileLength();
            }
        }
        if (unknownDiskIdCount > 0) {
            if (LOG.isWarnEnabled()) {
                LOG.warn("Unknown disk id count for filesystem " + fs + ":" + unknownDiskIdCount);
            }
        }
    } catch (IOException e) {
        throw new RuntimeException(
                "Error loading block metadata for directory " + dirPath.toString() + ": " + e.getMessage(), e);
    }
}

From source file:org.apache.impala.catalog.HdfsTable.java

License:Apache License

/**
 * For filesystems that don't support BlockLocation API, synthesize file blocks
 * by manually splitting the file range into fixed-size blocks.  That way, scan
 * ranges can be derived from file blocks as usual.  All synthesized blocks are given
 * an invalid network address so that the scheduler will treat them as remote.
 *//*from   w  w  w .j  a  v a  2 s  .  c  o m*/
private void synthesizeBlockMetadata(FileSystem fs, Path dirPath,
        HashMap<Path, List<HdfsPartition>> partsByPath) throws IOException {
    RemoteIterator<LocatedFileStatus> fileStatusIter = fs.listFiles(dirPath, true);
    while (fileStatusIter.hasNext()) {
        LocatedFileStatus fileStatus = fileStatusIter.next();
        if (!FileSystemUtil.isValidDataFile(fileStatus))
            continue;
        Path partPathDir = fileStatus.getPath().getParent();
        Preconditions.checkNotNull(partPathDir);
        List<HdfsPartition> partitions = partsByPath.get(partPathDir);
        // Skip if this file does not belong to any known partition.
        if (partitions == null) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("File " + fileStatus.getPath().toString() + " doesn't correspond "
                        + " to a known partition. Skipping metadata load for this file.");
            }
            continue;
        }
        String fileName = fileStatus.getPath().getName();
        FileDescriptor fd = new FileDescriptor(fileName, fileStatus.getLen(), fileStatus.getModificationTime());
        Preconditions.checkState(partitions.size() > 0);
        // For the purpose of synthesizing block metadata, we assume that all partitions
        // with the same location have the same file format.
        HdfsFileFormat fileFormat = partitions.get(0).getFileFormat();
        synthesizeFdBlockMetadata(fs, fd, fileFormat);
        // Update the partitions' metadata that this file belongs to.
        for (HdfsPartition partition : partitions) {
            partition.getFileDescriptors().add(fd);
            numHdfsFiles_++;
            totalHdfsBytes_ += fd.getFileLength();
        }
    }
}