Example usage for org.apache.hadoop.fs FileStatus getModificationTime

List of usage examples for org.apache.hadoop.fs FileStatus getModificationTime

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileStatus getModificationTime.

Prototype

public long getModificationTime() 

Source Link

Document

Get the modification time of the file.

Usage

From source file:com.flipkart.fdp.migration.distcp.codec.GenericHadoopCodec.java

License:Apache License

public List<FileTuple> getFileStatusRecursive(Path path, Collection<String> excludeList)
        throws IOException, AuthenticationException {

    List<FileTuple> response = new ArrayList<FileTuple>();

    FileStatus file = fs.getFileStatus(path);
    //TODO excludeList to be checked if file (not folder) is mentioned in excludeList.
    if (file != null && file.isFile()) {
        response.add(new FileTuple(MirrorUtils.getSimplePath(file.getPath()), file.getLen(),
                file.getModificationTime()));
        return response;
    }// w  w  w  .  j  av a 2 s  . com

    FileStatus[] fstats = fs.listStatus(path);

    if (fstats != null && fstats.length > 0) {

        for (FileStatus fstat : fstats) {

            if (fstat.isDirectory() && !excludeList.contains(MirrorUtils.getSimplePath(fstat.getPath()))) {

                response.addAll(getFileStatusRecursive(fstat.getPath(), excludeList));
            } else {

                //TODO excludeList to be checked if file (not folder) is mentioned in excludeList.

                response.add(new FileTuple(MirrorUtils.getSimplePath(fstat.getPath()), fstat.getLen(),
                        fstat.getModificationTime()));
            }
        }
    }
    return response;
}

From source file:com.flipkart.fdp.migration.distcp.state.HDFSStateManager.java

License:Apache License

public Map<String, TransferStatus> getPreviousTransferStatus() throws IOException {

    Map<String, TransferStatus> status = new HashMap<String, TransferStatus>();
    FileStatus fstats[] = null;/*ww w .  j  a va2s . c om*/

    try {
        fstats = fs.listStatus(batchBasePath);
    } catch (Exception e) {
        System.out.println("No Previous states found: " + e.getMessage());
    }

    if (fstats == null || fstats.length <= 0)
        return status;

    List<FileStatus> fstatList = new ArrayList<FileStatus>();

    for (FileStatus fstat : fstats) {
        if (fstat.isDirectory())
            fstatList.add(fstat);
    }

    Collections.sort(fstatList, new Comparator<FileStatus>() {

        @Override
        public int compare(FileStatus o1, FileStatus o2) {
            return (int) (o2.getModificationTime() - o1.getModificationTime());
            // decending order sort by timestamp
        }
    });

    // ignore the current state folder as well.
    fstatList.remove(0);

    for (FileStatus fstat : fstatList) {
        System.out.println("Processing State History: " + fstat.getPath());

        Path spath = new Path(fstat.getPath(), PREVIOUS_STATE_FILE_NAME);
        List<TransferStatus> stats = getAllStats(new Path(fstat.getPath(), REPORT_PATH));
        mergeStates(status, stats);
        if (fs.exists(spath)) {
            stats = getAllStats(spath);
            mergeStates(status, stats);
            break;
        }
    }
    return status;
}

From source file:com.flipkart.fdp.migration.distcp.utils.FileStatsDriver.java

License:Apache License

public List<String> getAllFilePath(Path filePath) throws IOException {
    List<String> fileList = new ArrayList<String>();
    FileStatus[] fileStatus = fs.listStatus(filePath);
    for (FileStatus fileStat : fileStatus) {
        if (fileStat.isDirectory()) {
            fileList.addAll(getAllFilePath(fileStat.getPath()));
        } else {//ww w .  j a  va  2  s  .c  o m
            long ts = fileStat.getModificationTime();
            if (ts >= startTS && ts <= endTS)
                fileList.add(fileStat.getPath().toUri().getPath() + "," + fileStat.getLen());
        }
    }
    return fileList;
}

From source file:com.flipkart.fdp.migration.distcp.utils.HistoricFileCleanUpDriver.java

License:Apache License

public List<String> getAllFilePath(Path filePath) throws IOException {
    List<String> fileList = new ArrayList<String>();
    FileStatus[] fileStatus = fs.listStatus(filePath);
    for (FileStatus fileStat : fileStatus) {
        if (fileStat.isDirectory()) {
            if (fileStat.getModificationTime() >= startTS && fileStat.getModificationTime() <= endTS
                    && filePath.toUri().getPath().toString() != rootpath)
                fileList.add(fileStat.getPath().toUri().getPath());
            else/*  ww w .  j a v a2s  .c o m*/
                fileList.addAll(getAllFilePath(fileStat.getPath()));
        }
        // } else {
        // if (fileStat.getModificationTime() >= startTS
        // && fileStat.getModificationTime() <= endTS)
        // fileList.add(fileStat.getPath().toUri().getPath());
        // }
    }
    return fileList;
}

From source file:com.floodCtr.Util.java

License:Open Source License

public static LocalResource newYarnAppResource(FileSystem fs, Path path, LocalResourceType type,
        LocalResourceVisibility vis) throws IOException {
    Path qualified = fs.makeQualified(path);
    FileStatus status = fs.getFileStatus(qualified);
    LocalResource resource = Records.newRecord(LocalResource.class);

    resource.setType(type);//from   www .  j a  va2s.c  o m
    resource.setVisibility(vis);
    resource.setResource(ConverterUtils.getYarnUrlFromPath(qualified));
    resource.setTimestamp(status.getModificationTime());
    resource.setSize(status.getLen());

    return resource;
}

From source file:com.flyhz.avengers.framework.AvengersClient.java

License:Apache License

/**
 * Main run function for the client/*from  w w  w .jav  a2s .c om*/
 * 
 * @return true if application completed successfully
 * @throws IOException
 * @throws YarnException
 */
private boolean run(String appName, List<String> commands) throws IOException, YarnException {

    LOG.info("Running Client");

    yarnClient.start();

    YarnClusterMetrics clusterMetrics = yarnClient.getYarnClusterMetrics();
    LOG.info("Got Cluster metric info from ASM" + ", numNodeManagers=" + clusterMetrics.getNumNodeManagers());

    List<NodeReport> clusterNodeReports = yarnClient.getNodeReports(NodeState.RUNNING);
    LOG.info("Got Cluster node info from ASM");
    for (NodeReport node : clusterNodeReports) {
        LOG.info("Got node report from ASM for" + ", nodeId=" + node.getNodeId() + ", nodeAddress"
                + node.getHttpAddress() + ", nodeRackName" + node.getRackName() + ", nodeNumContainers"
                + node.getNumContainers());
    }

    QueueInfo queueInfo = yarnClient.getQueueInfo(this.amQueue);
    LOG.info("Queue info" + ", queueName=" + queueInfo.getQueueName() + ", queueCurrentCapacity="
            + queueInfo.getCurrentCapacity() + ", queueMaxCapacity=" + queueInfo.getMaximumCapacity()
            + ", queueApplicationCount=" + queueInfo.getApplications().size() + ", queueChildQueueCount="
            + queueInfo.getChildQueues().size());

    List<QueueUserACLInfo> listAclInfo = yarnClient.getQueueAclsInfo();
    for (QueueUserACLInfo aclInfo : listAclInfo) {
        for (QueueACL userAcl : aclInfo.getUserAcls()) {
            LOG.info("User ACL Info for Queue" + ", queueName=" + aclInfo.getQueueName() + ", userAcl="
                    + userAcl.name());
        }
    }

    // Get a new application id
    YarnClientApplication app = yarnClient.createApplication();
    GetNewApplicationResponse appResponse = app.getNewApplicationResponse();
    // if needed
    // If we do not have min/max, we may not be able to correctly request
    // the required resources from the RM for the app master
    // Memory ask has to be a multiple of min and less than max.
    // Dump out information about cluster capability as seen by the resource
    // manager
    int maxMem = appResponse.getMaximumResourceCapability().getMemory();
    LOG.info("Max mem capabililty of resources in this cluster " + maxMem);

    // A resource ask cannot exceed the max.
    if (amMemory > maxMem) {
        LOG.info("AM memory specified above max threshold of cluster. Using max value." + ", specified="
                + amMemory + ", max=" + maxMem);
        amMemory = maxMem;
    }

    // set the application name
    ApplicationSubmissionContext appContext = app.getApplicationSubmissionContext();
    ApplicationId appId = appContext.getApplicationId();
    appContext.setApplicationName(appName);

    // Set up the container launch context for the application master
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);

    // set local resources for the application master
    // local files or archives as needed
    // In this scenario, the jar file for the application master is part of
    // the local resources
    Map<String, LocalResource> localResources = new HashMap<String, LocalResource>();

    FileSystem fs = DistributedFileSystem.get(conf);
    Path src = new Path(appJar);
    Path dst = new Path(fs.getHomeDirectory(), "avengers/" + batchId + "/avengers.jar");
    if (copy) {
        LOG.info("copy local jar to hdfs");
        fs.copyFromLocalFile(false, true, src, dst);
        copy = false;
    }
    this.hdfsPath = dst.toUri().toString();
    LOG.info("hdfs hdfsPath = {}", dst);
    FileStatus destStatus = fs.getFileStatus(dst);
    LocalResource amJarRsrc = Records.newRecord(LocalResource.class);

    amJarRsrc.setType(LocalResourceType.FILE);
    amJarRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
    LOG.info("YarnURLFromPath ->{}", ConverterUtils.getYarnUrlFromPath(dst));
    amJarRsrc.setResource(ConverterUtils.getYarnUrlFromPath(dst));
    amJarRsrc.setTimestamp(destStatus.getModificationTime());
    amJarRsrc.setSize(destStatus.getLen());
    localResources.put("avengers.jar", amJarRsrc);

    // Set the log4j properties if needed
    if (!log4jPropFile.isEmpty()) {
        Path log4jSrc = new Path(log4jPropFile);
        Path log4jDst = new Path(fs.getHomeDirectory(), "log4j.props");
        fs.copyFromLocalFile(false, true, log4jSrc, log4jDst);
        FileStatus log4jFileStatus = fs.getFileStatus(log4jDst);
        LocalResource log4jRsrc = Records.newRecord(LocalResource.class);
        log4jRsrc.setType(LocalResourceType.FILE);
        log4jRsrc.setVisibility(LocalResourceVisibility.APPLICATION);
        log4jRsrc.setResource(ConverterUtils.getYarnUrlFromURI(log4jDst.toUri()));
        log4jRsrc.setTimestamp(log4jFileStatus.getModificationTime());
        log4jRsrc.setSize(log4jFileStatus.getLen());
        localResources.put("log4j.properties", log4jRsrc);
    }

    // The shell script has to be made available on the final container(s)
    // where it will be executed.
    // To do this, we need to first copy into the filesystem that is visible
    // to the yarn framework.
    // We do not need to set this as a local resource for the application
    // master as the application master does not need it.

    // Set local resource info into app master container launch context
    amContainer.setLocalResources(localResources);

    // Set the necessary security tokens as needed
    // amContainer.setContainerTokens(containerToken);

    // Set the env variables to be setup in the env where the application
    // master will be run
    LOG.info("Set the environment for the application master");
    Map<String, String> env = new HashMap<String, String>();
    StringBuilder classPathEnv = new StringBuilder(Environment.CLASSPATH.$()).append(File.pathSeparatorChar);
    for (String c : conf.getStrings(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
            YarnConfiguration.DEFAULT_YARN_APPLICATION_CLASSPATH)) {
        classPathEnv.append(File.pathSeparatorChar);
        classPathEnv.append(c.trim());
    }
    classPathEnv.append(File.pathSeparatorChar).append("./log4j.properties");

    // add the runtime classpath needed for tests to work
    if (conf.getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
        classPathEnv.append(':');
        classPathEnv.append(System.getProperty("java.class.path"));
    }
    LOG.info("CLASSPATH -> " + classPathEnv);
    env.put("CLASSPATH", classPathEnv.toString());

    amContainer.setEnvironment(env);

    for (String cmd : commands) {
        LOG.info("run command {},appId {}", cmd, appId.getId());
    }

    amContainer.setCommands(commands);

    // Set up resource type requirements
    // For now, only memory is supported so we set memory requirements
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemory(amMemory);
    appContext.setResource(capability);

    // Service data is a binary blob that can be passed to the application
    // Not needed in this scenario
    // amContainer.setServiceData(serviceData);

    // Setup security tokens
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials credentials = new Credentials();
        String tokenRenewer = conf.get(YarnConfiguration.RM_PRINCIPAL);
        if (tokenRenewer == null || tokenRenewer.length() == 0) {
            throw new IOException("Can't get Master Kerberos principal for the RM to use as renewer");
        }

        // For now, only getting tokens for the default file-system.
        final Token<?> tokens[] = fs.addDelegationTokens(tokenRenewer, credentials);
        if (tokens != null) {
            for (Token<?> token : tokens) {
                LOG.info("Got dt for " + fs.getUri() + "; " + token);
            }
        }
        DataOutputBuffer dob = new DataOutputBuffer();
        credentials.writeTokenStorageToStream(dob);
        ByteBuffer fsTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
        amContainer.setTokens(fsTokens);
    }

    appContext.setAMContainerSpec(amContainer);

    // Set the priority for the application master
    Priority pri = Records.newRecord(Priority.class);
    pri.setPriority(amPriority);
    appContext.setPriority(pri);

    // Set the queue to which this application is to be submitted in the RM
    appContext.setQueue(amQueue);

    // Submit the application to the applications manager
    // SubmitApplicationResponse submitResp =
    // applicationsManager.submitApplication(appRequest);
    // Ignore the response as either a valid response object is returned on
    // success
    // or an exception thrown to denote some form of a failure
    LOG.info("Submitting application to ASM");

    yarnClient.submitApplication(appContext);

    // Try submitting the same request again
    // app submission failure?

    // Monitor the application
    return monitorApplication(appId);

}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer.java

License:Apache License

/**
 * @param ts/*from  ww  w  . java2 s. c  o m*/
 *          target timestamp
 * @return list of hoplogs, whose expiry markers were created before target
 *         timestamp, and the expiry marker itself.
 * @throws IOException
 */
protected List<FileStatus> getOptimizationTargets(long ts) throws IOException {
    if (logger.isDebugEnabled()) {
        logger.debug("{}Identifying optimization targets " + ts, logPrefix);
    }

    List<FileStatus> deleteTargets = new ArrayList<FileStatus>();
    FileStatus[] markers = getExpiryMarkers();
    if (markers != null) {
        for (FileStatus marker : markers) {
            String name = truncateExpiryExtension(marker.getPath().getName());
            long timestamp = marker.getModificationTime();

            // expired minor compacted files are not being used anywhere. These can
            // be removed immediately. All the other expired files should be removed
            // when the files have aged
            boolean isTarget = false;

            if (name.endsWith(MINOR_HOPLOG_EXTENSION)) {
                isTarget = true;
            } else if (timestamp < ts && name.endsWith(FLUSH_HOPLOG_EXTENSION)) {
                isTarget = true;
            } else if (timestamp < ts && name.endsWith(MAJOR_HOPLOG_EXTENSION)) {
                long majorCInterval = ((long) store.getMajorCompactionInterval()) * 60 * 1000;
                if (timestamp < (System.currentTimeMillis() - majorCInterval)) {
                    isTarget = true;
                }
            }
            if (!isTarget) {
                continue;
            }

            // if the file is still being read, do not delete or rename it
            TrackedReference<Hoplog> used = hoplogReadersController.getInactiveHoplog(name);
            if (used != null) {
                if (used.inUse() && logger.isDebugEnabled()) {
                    logger.debug("{}Optimizer: found active expired hoplog:" + name, logPrefix);
                } else if (logger.isDebugEnabled()) {
                    logger.debug("{}Optimizer: found open expired hoplog:" + name, logPrefix);
                }
                continue;
            }

            if (logger.isDebugEnabled()) {
                logger.debug("{}Delete target identified " + marker.getPath(), logPrefix);
            }

            deleteTargets.add(marker);
            Path hoplogPath = new Path(bucketPath, name);
            if (store.getFileSystem().exists(hoplogPath)) {
                FileStatus hoplog = store.getFileSystem().getFileStatus(hoplogPath);
                deleteTargets.add(hoplog);
            }
        }
    }
    return deleteTargets;
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer.java

License:Apache License

private void cleanupTmpFiles() throws IOException {
    if (oldTmpFiles == null && tmpFiles == null) {
        return;//from  w ww  .j a  va2s.co  m
    }

    if (oldTmpFiles != null) {
        FileSystem fs = store.getFileSystem();
        long now = System.currentTimeMillis();
        for (Iterator<FileStatus> itr = oldTmpFiles.iterator(); itr.hasNext();) {
            FileStatus file = itr.next();
            if (file.getModificationTime() + TMP_FILE_EXPIRATION_TIME_MS > now) {
                if (logger.isDebugEnabled()) {
                    logger.debug("{}Deleting temporary file:" + file.getPath(), logPrefix);
                }
                fs.delete(file.getPath(), false);
                itr.remove();
            }
        }
    }
    if (tmpFiles != null) {
        for (Hoplog so : tmpFiles.keySet()) {
            if (logger.isDebugEnabled()) {
                logger.debug("{}Deleting temporary file:" + so.getFileName(), logPrefix);
            }
            deleteTmpFile(null, so);
        }
    }
}

From source file:com.gemstone.gemfire.cache.hdfs.internal.hoplog.mapreduce.HoplogUtil.java

License:Apache License

private static Map<String, Long> getExpiredHoplogs(FileSystem fs, FileStatus[] bucketFiles,
        Pattern expiredPattern) throws IOException {
    Map<String, Long> expiredHoplogs = new HashMap<String, Long>();

    for (FileStatus file : bucketFiles) {
        if (!file.isFile()) {
            continue;
        }//from ww w.  j a v a2 s .  com
        String fileName = file.getPath().getName();
        Matcher match = expiredPattern.matcher(fileName);
        if (!match.matches()) {
            continue;
        }
        expiredHoplogs.put(fileName, file.getModificationTime());
    }
    return expiredHoplogs;
}

From source file:com.github.hdl.tensorflow.yarn.app.TFAmContainer.java

License:Apache License

public void addToLocalResources(FileSystem fs, Path dst, String fileDstPath,
        Map<String, LocalResource> localResources) throws IOException {
    FileStatus scFileStatus = fs.getFileStatus(dst);
    LocalResource scRsrc = LocalResource.newInstance(URL.fromURI(dst.toUri()), LocalResourceType.FILE,
            LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime());
    localResources.put(fileDstPath, scRsrc);
}