Example usage for org.apache.hadoop.fs FileSystem isFile

List of usage examples for org.apache.hadoop.fs FileSystem isFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem isFile.

Prototype

@Deprecated
public boolean isFile(Path f) throws IOException 

Source Link

Document

True iff the named path is a regular file.

Usage

From source file:org.apache.blur.manager.writer.IndexImporter.java

License:Apache License

private Map<Path, Path> toMap(FileSystem fileSystem, Set<Path> inuseDirs) throws IOException {
    Map<Path, Path> result = new TreeMap<Path, Path>();
    for (Path p : inuseDirs) {
        if (!fileSystem.isFile(p)) {
            FileStatus[] listStatus = fileSystem.listStatus(p);
            for (FileStatus status : listStatus) {
                result.put(status.getPath(), p);
            }/*www .  j a  v  a  2  s  .c  o  m*/
        }
    }
    return result;
}

From source file:org.apache.blur.mapreduce.lib.BlurOutputCommitter.java

License:Apache License

@Override
public void commitTask(TaskAttemptContext context) throws IOException {
    LOG.info("Running commit task.");
    Conf conf = setup(context);// ww w.  j  a v  a2 s.  c  o  m
    FileSystem fileSystem = conf._newIndex.getFileSystem(conf._configuration);
    if (fileSystem.exists(conf._newIndex) && !fileSystem.isFile(conf._newIndex)) {
        Path dst = new Path(conf._indexPath, conf._taskAttemptID.toString() + ".task_complete");
        LOG.info("Committing [{0}] to [{1}]", conf._newIndex, dst);
        fileSystem.rename(conf._newIndex, dst);
    } else {
        throw new IOException("Path [" + conf._newIndex + "] does not exist, can not commit.");
    }
}

From source file:org.apache.blur.mapreduce.lib.BlurOutputFormatTest.java

License:Apache License

private void dump(Path path, Configuration conf) throws IOException {
    FileSystem fileSystem = path.getFileSystem(conf);
    System.out.println(path);//from ww  w .  j a v  a 2 s.c  o  m
    if (!fileSystem.isFile(path)) {
        FileStatus[] listStatus = fileSystem.listStatus(path);
        for (FileStatus fileStatus : listStatus) {
            dump(fileStatus.getPath(), conf);
        }
    }
}

From source file:org.apache.carbondata.core.datastorage.store.impl.FileFactory.java

License:Apache License

/**
 * This method checks the given path exists or not and also is it file or
 * not if the performFileCheck is true//from www  .j a  va2s. c  om
 *
 * @param filePath         - Path
 * @param fileType         - FileType Local/HDFS
 * @param performFileCheck - Provide false for folders, true for files and
 */
public static boolean isFileExist(String filePath, FileType fileType, boolean performFileCheck)
        throws IOException {
    filePath = filePath.replace("\\", "/");
    switch (fileType) {
    case HDFS:
    case VIEWFS:
        Path path = new Path(filePath);
        FileSystem fs = path.getFileSystem(configuration);
        if (performFileCheck) {
            return fs.exists(path) && fs.isFile(path);
        } else {
            return fs.exists(path);
        }

    case LOCAL:
    default:
        File defaultFile = new File(filePath);

        if (performFileCheck) {
            return defaultFile.exists() && defaultFile.isFile();
        } else {
            return defaultFile.exists();
        }
    }
}

From source file:org.apache.carbondata.core.datastore.impl.FileFactory.java

License:Apache License

/**
 * This method checks the given path exists or not and also is it file or
 * not if the performFileCheck is true/*  ww w. j ava 2 s.  c  om*/
 *
 * @param filePath         - Path
 * @param fileType         - FileType Local/HDFS
 * @param performFileCheck - Provide false for folders, true for files and
 */
public static boolean isFileExist(String filePath, FileType fileType, boolean performFileCheck)
        throws IOException {
    filePath = filePath.replace("\\", "/");
    switch (fileType) {
    case HDFS:
    case ALLUXIO:
    case VIEWFS:
        Path path = new Path(filePath);
        FileSystem fs = path.getFileSystem(configuration);
        if (performFileCheck) {
            return fs.exists(path) && fs.isFile(path);
        } else {
            return fs.exists(path);
        }

    case LOCAL:
    default:
        filePath = getUpdatedFilePath(filePath, fileType);
        File defaultFile = new File(filePath);

        if (performFileCheck) {
            return defaultFile.exists() && defaultFile.isFile();
        } else {
            return defaultFile.exists();
        }
    }
}

From source file:org.apache.crunch.impl.spark.SparkRuntime.java

License:Apache License

private void distributeFiles() {
    try {//ww w.  j  a v a  2s  .  c  om
        URI[] uris = DistributedCache.getCacheFiles(conf);
        if (uris != null) {
            URI[] outURIs = new URI[uris.length];
            for (int i = 0; i < uris.length; i++) {
                Path path = new Path(uris[i]);
                FileSystem fs = path.getFileSystem(conf);
                if (fs.isFile(path)) {
                    outURIs[i] = uris[i];
                } else {
                    Path mergePath = new Path(path.getParent(), "sparkreadable-" + path.getName());
                    FileUtil.copyMerge(fs, path, fs, mergePath, false, conf, "");
                    outURIs[i] = mergePath.toUri();
                }
                sparkContext.addFile(outURIs[i].toString());
            }
            DistributedCache.setCacheFiles(outURIs, conf);
        }
    } catch (IOException e) {
        throw new RuntimeException("Error retrieving cache files", e);
    }
}

From source file:org.apache.crunch.io.orc.OrcFileReaderFactory.java

License:Apache License

@Override
public Iterator<T> read(FileSystem fs, final Path path) {
    try {/*ww  w  . j a va 2  s  . c  om*/
        if (!fs.isFile(path)) {
            throw new CrunchRuntimeException("Not a file: " + path);
        }

        inputFn.initialize();

        FileStatus status = fs.getFileStatus(path);
        FileSplit split = new FileSplit(path, 0, status.getLen(), new String[0]);

        JobConf conf = new JobConf();
        if (readColumns != null) {
            conf.setBoolean(OrcFileSource.HIVE_READ_ALL_COLUMNS, false);
            conf.set(ColumnProjectionUtils.READ_COLUMN_IDS_CONF_STR,
                    OrcFileSource.getColumnIdsStr(readColumns));
        }
        final RecordReader<NullWritable, OrcStruct> reader = inputFormat.getRecordReader(split, conf,
                Reporter.NULL);

        return new UnmodifiableIterator<T>() {

            private boolean checked = false;
            private boolean hasNext;
            private OrcStruct value;
            private OrcWritable writable = new OrcWritable();

            @Override
            public boolean hasNext() {
                try {
                    if (value == null) {
                        value = reader.createValue();
                    }
                    if (!checked) {
                        hasNext = reader.next(NullWritable.get(), value);
                        checked = true;
                    }
                    return hasNext;
                } catch (Exception e) {
                    throw new CrunchRuntimeException("Error while reading local file: " + path, e);
                }
            }

            @Override
            public T next() {
                try {
                    if (value == null) {
                        value = reader.createValue();
                    }
                    if (!checked) {
                        reader.next(NullWritable.get(), value);
                    }
                    checked = false;
                    writable.set(value);
                    return inputFn.map(writable);
                } catch (Exception e) {
                    throw new CrunchRuntimeException("Error while reading local file: " + path, e);
                }
            }

        };
    } catch (Exception e) {
        throw new CrunchRuntimeException("Error while reading local file: " + path, e);
    }
}

From source file:org.apache.crunch.kafka.offset.hdfs.HDFSOffsetReader.java

License:Apache License

@Override
public Map<TopicPartition, Long> readOffsets(long persistedOffsetTime) throws IOException {
    Path offsetFilePath = HDFSOffsetWriter.getPersistedTimeStoragePath(baseOffsetStoragePath,
            persistedOffsetTime);//  w w w . j a  v  a2  s  . c om

    FileSystem fs = getFileSystem();
    if (fs.isFile(offsetFilePath)) {
        InputStream inputStream = fs.open(offsetFilePath);
        try {
            Offsets offsets = MAPPER.readValue(inputStream, Offsets.class);
            Map<TopicPartition, Long> partitionsMap = new HashMap<>();
            for (Offsets.PartitionOffset partitionOffset : offsets.getOffsets()) {
                partitionsMap.put(
                        new TopicPartition(partitionOffset.getTopic(), partitionOffset.getPartition()),
                        partitionOffset.getOffset());
            }
            return partitionsMap;
        } finally {
            inputStream.close();
        }
    }

    LOG.error("Offset file at {} is not a file or does not exist.", offsetFilePath);
    return null;
}

From source file:org.apache.falcon.converter.OozieProcessMapper.java

License:Apache License

private Path getUserWorkflowPath(Cluster cluster, Path bundlePath) throws FalconException {
    try {//from  www. ja  v a2  s .c  o m
        FileSystem fs = FileSystem.get(ClusterHelper.getConfiguration(cluster));
        Process process = getEntity();
        Path wfPath = new Path(process.getWorkflow().getPath());
        if (fs.isFile(wfPath)) {
            return new Path(bundlePath, EntityUtil.PROCESS_USER_DIR + "/" + wfPath.getName().toString());
        } else {
            return new Path(bundlePath, EntityUtil.PROCESS_USER_DIR);
        }
    } catch (IOException e) {
        throw new FalconException("Failed to get workflow path", e);
    }
}

From source file:org.apache.falcon.converter.OozieProcessMapper.java

License:Apache License

private Path getUserLibPath(Cluster cluster, Path bundlePath) throws FalconException {
    try {/*from  w w  w . j  a v  a  2 s .c  o m*/
        FileSystem fs = FileSystem.get(ClusterHelper.getConfiguration(cluster));
        Process process = getEntity();
        if (process.getWorkflow().getLib() == null) {
            return null;
        }
        Path libPath = new Path(process.getWorkflow().getLib());
        if (fs.isFile(libPath)) {
            return new Path(bundlePath, EntityUtil.PROCESS_USERLIB_DIR + "/" + libPath.getName().toString());
        } else {
            return new Path(bundlePath, EntityUtil.PROCESS_USERLIB_DIR);
        }
    } catch (IOException e) {
        throw new FalconException("Failed to get user lib path", e);
    }
}