Example usage for org.apache.hadoop.fs FileSystem isFile

List of usage examples for org.apache.hadoop.fs FileSystem isFile

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem isFile.

Prototype

@Deprecated
public boolean isFile(Path f) throws IOException 

Source Link

Document

True iff the named path is a regular file.

Usage

From source file:com.netflix.bdp.s3.S3PartitionedOutputCommitter.java

License:Apache License

protected Set<String> getPartitions(FileSystem attemptFS, Path attemptPath, List<FileStatus> taskOutput)
        throws IOException {
    // get a list of partition directories
    Set<String> partitions = Sets.newLinkedHashSet();
    for (FileStatus stat : taskOutput) {
        // sanity check the output paths
        Path outputFile = stat.getPath();
        if (!attemptFS.isFile(outputFile)) {
            throw new RuntimeException("Task output entry is not a file: " + outputFile);
        }/*ww  w.  j  av a2 s.  co m*/
        String partition = getPartition(Paths.getRelativePath(attemptPath, outputFile));
        if (partition != null) {
            partitions.add(partition);
        } else {
            partitions.add(TABLE_ROOT);
        }
    }

    return partitions;
}

From source file:com.netflix.bdp.s3mper.listing.ConsistentListingAspect.java

License:Apache License

private List<Path> recursiveList(FileSystem fs, Path path) throws IOException {
    List<Path> result = new ArrayList<Path>();

    try {/*from www.  j  a  v  a  2 s .c o m*/
        result.add(path);

        if (!fs.isFile(path)) {
            FileStatus[] children = fs.listStatus(path);

            if (children == null) {
                return result;
            }

            for (FileStatus child : children) {
                if (child.isDir()) {
                    result.addAll(recursiveList(fs, child.getPath()));
                } else {
                    result.add(child.getPath());
                }
            }
        }
    } catch (Exception e) {
        log.info("A problem occurred recursively deleting path: " + path + " " + e.getMessage());
    }

    return result;
}

From source file:com.practicalHadoop.outputformat.MultpleDirectories.FileOutputCommitter.java

License:Apache License

/**
 * Move all of the files from the work directory to the final output
 * @param context the task context//from   w w  w .j a v a  2  s  .co  m
 * @param fs the output file system
 * @param jobOutputDir the final output direcotry
 * @param taskOutput the work path
 * @throws IOException
 */
private void moveTaskOutputs(TaskAttemptContext context, FileSystem fs, Path jobOutputDir, Path taskOutput)
        throws IOException {
    TaskAttemptID attemptId = context.getTaskAttemptID();
    context.progress();
    if (fs.isFile(taskOutput)) {
        Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, workPath);
        if (!fs.rename(taskOutput, finalOutputPath)) {
            if (!fs.delete(finalOutputPath, true)) {
                throw new IOException("Failed to delete earlier output of task: " + attemptId);
            }
            if (!fs.rename(taskOutput, finalOutputPath)) {
                throw new IOException("Failed to save output of task: " + attemptId);
            }
        }
        LOG.debug("Moved " + taskOutput + " to " + finalOutputPath);
    } else if (fs.getFileStatus(taskOutput).isDir()) {
        FileStatus[] paths = fs.listStatus(taskOutput);
        Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, workPath);
        fs.mkdirs(finalOutputPath);
        if (paths != null) {
            for (FileStatus path : paths) {
                moveTaskOutputs(context, fs, jobOutputDir, path.getPath());
            }
        }
    }
}

From source file:com.redsqirl.workflow.server.Workflow.java

License:Open Source License

/**
 * Reads the xml part of a workflow./*from   w  ww.  j a  v a  2 s.com*/
 * 
 * @param filePath
 *            the xml file path to read from.
 * @return null if OK, or a description of the error.
 */
public String read(String filePath) {
    String error = null;

    try {
        String[] path = filePath.split("/");
        String fileName = path[path.length - 1];
        String tempPath = WorkflowPrefManager.getPathtmpfolder() + "/" + fileName + "_"
                + RandomString.getRandomName(4);
        FileSystem fs = NameNodeVar.getFS();
        if (!fs.isFile(new Path(filePath))) {
            return "'" + filePath + "' is not a file.";
        }
        logger.debug("filePath  " + filePath);
        logger.debug("tempPath  " + tempPath);

        fs.copyToLocalFile(new Path(filePath), new Path(tempPath));

        File xmlFile = new File(tempPath);
        error = readFromLocal(xmlFile);

        // clean temporary files
        xmlFile.delete();

        if (filePath.startsWith(WorkflowPrefManager.getBackupPath())) {
            saved = false;
            this.path = null;
        } else {
            this.path = filePath;
        }
    } catch (Exception e) {
        error = LanguageManagerWF.getText("workflow.read_failXml");
        logger.error(error, e);

    }

    return error;
}

From source file:com.soteradefense.dga.LouvainRunner.java

License:Apache License

private boolean isComplete(String path) throws IOException {
    FileSystem fs = FileSystem.get(configuration);
    Path completeFile = new Path(path + "_COMPLETE");
    return fs.isFile(completeFile);
}

From source file:com.toddbodnar.simpleHive.IO.hdfsFile.java

@Override
public void resetStream() {

    try {/*from   w  w  w . j ava2s .c om*/
        if (out != null)
            out.close();
        writing = false;
        if (in != null)
            in.close();
        FileSystem fs = FileSystem.get(GetConfiguration.get());

        if (fs.isFile(location)) {
            LinkedList<FileStatus> file = new LinkedList<>();
            file.add(fs.getFileStatus(location));
            theFiles = file.iterator();
        } else {
            LinkedList<FileStatus> files = new LinkedList<>();
            RemoteIterator<LocatedFileStatus> fileremote = fs.listFiles(location, true);
            while (fileremote.hasNext())
                files.add(fileremote.next());
            theFiles = files.iterator();
        }

        FileStatus nextFileStatus;
        do {
            if (!theFiles.hasNext()) {
                System.err.println("WARNING: File is Empty");
                super.next = null;
                return;
            }
            nextFileStatus = theFiles.next();
        } while (fs.isDirectory(nextFileStatus.getPath()) || nextFileStatus.getLen() == 0);

        in = new BufferedReader(new InputStreamReader(fs.open(nextFileStatus.getPath())));
        next = in.readLine();

        //out.flush();
    } catch (FileNotFoundException ex) {
        Logger.getLogger(fileFile.class.getName()).log(Level.SEVERE, null, ex);
    } catch (IOException ex) {
        Logger.getLogger(fileFile.class.getName()).log(Level.SEVERE, null, ex);
    }
}

From source file:com.vf.flume.sink.hdfs.HDFSDataStream.java

License:Apache License

protected void doOpen(Configuration conf, Path dstPath, FileSystem hdfs) throws IOException {
    if (useRawLocalFileSystem) {
        if (hdfs instanceof LocalFileSystem) {
            hdfs = ((LocalFileSystem) hdfs).getRaw();
        } else {//  w  w  w  .  j a v  a2 s  .  c  o  m
            logger.warn("useRawLocalFileSystem is set to true but file system "
                    + "is not of type LocalFileSystem: " + hdfs.getClass().getName());
        }
    }

    boolean appending = false;
    //    System.out.println(" ------ support-----" + conf.getBoolean("hdfs.append.support", false));
    //    if (conf.getBoolean("hdfs.append.support", false) == true && hdfs.isFile
    //            (dstPath)) {
    if (true == true && hdfs.isFile(dstPath)) {
        outStream = hdfs.append(dstPath);
        appending = true;
    } else {
        outStream = hdfs.create(dstPath);
    }

    serializer = EventSerializerFactory.getInstance(serializerType, serializerContext, outStream);
    if (appending && !serializer.supportsReopen()) {
        outStream.close();
        serializer = null;
        throw new IOException("serializer (" + serializerType + ") does not support append");
    }

    // must call superclass to check for replication issues
    registerCurrentStream(outStream, hdfs, dstPath);

    if (appending) {
        serializer.afterReopen();
    } else {
        serializer.afterCreate();
    }
}

From source file:cz.muni.fi.xfabian7.bp.mgrid.HdfsStorageBucket.java

/**
 * Return an OutputStream over the path using FileSystem
 *
 * @param fs FileSystem// w w  w  .j  ava2  s  .  c o m
 * @return OutputStream
 * @throws IOException
 */
public OutputStream openOutputStream(FileSystem fs, Path path) throws IOException {
    if (!fs.isFile(path)) {
        createFile(path);
    }
    return fs.append(path);
}

From source file:cz.muni.fi.xfabian7.bp.mgrid.HdfsStorageBucket.java

/**
 * Read serialID and objectCount from meta file
 *
 * @throws IOException/*from  w w w.  j  a  v  a2s  . c o m*/
 */
public void readMetaFile() throws IOException {

    FileSystem fs = getFileSystem();
    Path metaFilePath = new Path(path + ".meta");

    System.out.println("HdfsStorageBucket readMetaFile:" + path);

    if (fs.isFile(metaFilePath)) {

        InputStream in = openInputStream(metaFilePath);
        BufferedReader br = new BufferedReader(new InputStreamReader(in));

        setSize(Integer.valueOf(br.readLine()));
        in.close();
    } else {
        createMetaFile();
    }
}

From source file:edu.arizona.cs.hadoop.fs.irods.output.HirodsFileOutputCommitter.java

License:Apache License

private void moveTaskOutputsToIRODS(TaskAttemptContext context, FileSystem outfs, Path outDir,
        FileSystem workfs, Path workOutput) throws IOException {
    context.progress();/*from ww w. j av a2  s.  c  om*/
    if (workfs.isFile(workOutput)) {
        Path finalOutputPath = getFinalPath(outDir, workOutput, this.workPath);
        FSDataOutputStream irods_os = null;
        FSDataInputStream temp_is = null;
        try {
            // commit to iRODS
            irods_os = outfs.create(finalOutputPath, true);
            temp_is = workfs.open(workOutput);

            byte[] buffer = new byte[100 * 1024];
            int bytes_read = 0;

            while ((bytes_read = temp_is.read(buffer)) != -1) {
                irods_os.write(buffer, 0, bytes_read);
            }
        } finally {
            if (temp_is != null) {
                try {
                    temp_is.close();
                } catch (IOException ex) {
                    // ignore exceptions
                }
            }

            // remove temporary file
            try {
                workfs.delete(workOutput, true);
            } catch (IOException ex) {
                // ignore exceptions
            }

            if (irods_os != null) {
                irods_os.close();
            }
        }

        LOG.debug("Moved " + workOutput + " to " + finalOutputPath);
    } else if (workfs.getFileStatus(workOutput).isDir()) {
        FileStatus[] paths = workfs.listStatus(workOutput);
        Path finalOutputPath = getFinalPath(outDir, workOutput, this.workPath);
        outfs.mkdirs(finalOutputPath);
        if (paths != null) {
            for (FileStatus path : paths) {
                moveTaskOutputsToIRODS(context, outfs, outDir, workfs, path.getPath());
            }
        }
    }
}