Example usage for org.apache.hadoop.hdfs DistributedFileSystem isDirectory

List of usage examples for org.apache.hadoop.hdfs DistributedFileSystem isDirectory

Introduction

In this page you can find the example usage for org.apache.hadoop.hdfs DistributedFileSystem isDirectory.

Prototype

@Deprecated
public boolean isDirectory(Path f) throws IOException 

Source Link

Document

True iff the named path is a directory.

Usage

From source file:com.cloudera.impala.analysis.LoadDataStmt.java

License:Apache License

private void analyzePaths(Analyzer analyzer, HdfsTable hdfsTable) throws AnalysisException {
    // The user must have permission to access the source location. Since the files will
    // be moved from this location, the user needs to have all permission.
    sourceDataPath_.analyze(analyzer, Privilege.ALL);

    try {//ww  w.  ja v a 2s.c  o  m
        Path source = sourceDataPath_.getPath();
        FileSystem fs = source.getFileSystem(FileSystemUtil.getConfiguration());
        // sourceDataPath_.analyze() ensured that path is on an HDFS filesystem.
        Preconditions.checkState(fs instanceof DistributedFileSystem);
        DistributedFileSystem dfs = (DistributedFileSystem) fs;
        if (!dfs.exists(source)) {
            throw new AnalysisException(String.format("INPATH location '%s' does not exist.", sourceDataPath_));
        }

        if (dfs.isDirectory(source)) {
            if (FileSystemUtil.getTotalNumVisibleFiles(source) == 0) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' contains no visible files.", sourceDataPath_));
            }
            if (FileSystemUtil.containsSubdirectory(source)) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' cannot contain subdirectories.", sourceDataPath_));
            }
        } else { // INPATH points to a file.
            if (FileSystemUtil.isHiddenFile(source.getName())) {
                throw new AnalysisException(
                        String.format("INPATH location '%s' points to a hidden file.", source));
            }
        }

        String noWriteAccessErrorMsg = String.format(
                "Unable to LOAD DATA into "
                        + "target table (%s) because Impala does not have WRITE access to HDFS " + "location: ",
                hdfsTable.getFullName());

        HdfsPartition partition;
        String location;
        if (partitionSpec_ != null) {
            partition = hdfsTable.getPartition(partitionSpec_.getPartitionSpecKeyValues());
            location = partition.getLocation();
            if (!TAccessLevelUtil.impliesWriteAccess(partition.getAccessLevel())) {
                throw new AnalysisException(noWriteAccessErrorMsg + partition.getLocation());
            }
        } else {
            // "default" partition
            partition = hdfsTable.getPartitions().get(0);
            location = hdfsTable.getLocation();
            if (!hdfsTable.hasWriteAccess()) {
                throw new AnalysisException(noWriteAccessErrorMsg + hdfsTable.getLocation());
            }
        }
        Preconditions.checkNotNull(partition);

        // Until Frontend.loadTableData() can handle cross-filesystem and filesystems
        // that aren't HDFS, require that source and dest are on the same HDFS.
        if (!FileSystemUtil.isPathOnFileSystem(new Path(location), fs)) {
            throw new AnalysisException(String.format(
                    "Unable to LOAD DATA into target table (%s) because source path (%s) and "
                            + "destination %s (%s) are on different file-systems.",
                    hdfsTable.getFullName(), source, partitionSpec_ == null ? "table" : "partition",
                    partition.getLocation()));
        }
        // Verify the files being loaded are supported.
        for (FileStatus fStatus : fs.listStatus(source)) {
            if (fs.isDirectory(fStatus.getPath()))
                continue;
            StringBuilder errorMsg = new StringBuilder();
            HdfsFileFormat fileFormat = partition.getInputFormatDescriptor().getFileFormat();
            if (!fileFormat.isFileCompressionTypeSupported(fStatus.getPath().toString(), errorMsg)) {
                throw new AnalysisException(errorMsg.toString());
            }
        }
    } catch (FileNotFoundException e) {
        throw new AnalysisException("File not found: " + e.getMessage(), e);
    } catch (IOException e) {
        throw new AnalysisException("Error accessing file system: " + e.getMessage(), e);
    }
}

From source file:com.cloudera.impala.service.Frontend.java

License:Apache License

/**
 * Loads a table or partition with one or more data files. If the "overwrite" flag
 * in the request is true, all existing data in the table/partition will be replaced.
 * If the "overwrite" flag is false, the files will be added alongside any existing
 * data files.//  w w  w .  j a v a 2  s . c  o m
 */
public TLoadDataResp loadTableData(TLoadDataReq request) throws ImpalaException, IOException {
    TableName tableName = TableName.fromThrift(request.getTable_name());

    // Get the destination for the load. If the load is targeting a partition,
    // this the partition location. Otherwise this is the table location.
    String destPathString = null;
    if (request.isSetPartition_spec()) {
        destPathString = impaladCatalog_
                .getHdfsPartition(tableName.getDb(), tableName.getTbl(), request.getPartition_spec())
                .getLocation();
    } else {
        destPathString = impaladCatalog_.getTable(tableName.getDb(), tableName.getTbl()).getMetaStoreTable()
                .getSd().getLocation();
    }

    Path destPath = new Path(destPathString);
    DistributedFileSystem dfs = FileSystemUtil.getDistributedFileSystem(destPath);

    // Create a temporary directory within the final destination directory to stage the
    // file move.
    Path tmpDestPath = FileSystemUtil.makeTmpSubdirectory(destPath);

    Path sourcePath = new Path(request.source_path);
    int filesLoaded = 0;
    if (dfs.isDirectory(sourcePath)) {
        filesLoaded = FileSystemUtil.moveAllVisibleFiles(sourcePath, tmpDestPath);
    } else {
        FileSystemUtil.moveFile(sourcePath, tmpDestPath, true);
        filesLoaded = 1;
    }

    // If this is an OVERWRITE, delete all files in the destination.
    if (request.isOverwrite()) {
        FileSystemUtil.deleteAllVisibleFiles(destPath);
    }

    // Move the files from the temporary location to the final destination.
    FileSystemUtil.moveAllVisibleFiles(tmpDestPath, destPath);
    // Cleanup the tmp directory.
    dfs.delete(tmpDestPath, true);
    TLoadDataResp response = new TLoadDataResp();
    TColumnValue col = new TColumnValue();
    String loadMsg = String.format("Loaded %d file(s). Total files in destination location: %d", filesLoaded,
            FileSystemUtil.getTotalNumVisibleFiles(destPath));
    col.setString_val(loadMsg);
    response.setLoad_summary(new TResultRow(Lists.newArrayList(col)));
    return response;
}

From source file:se.sics.gvod.stream.system.hops.SetupExperiment.java

License:Open Source License

public static void main(String[] args) throws IOException, HashUtil.HashBuilderException {
    String hopsURL = "bbc1.sics.se:26801";
    Configuration conf = new Configuration();
    conf.set("fs.defaultFS", hopsURL);
    DistributedFileSystem fs = (DistributedFileSystem) FileSystem.get(conf);

    String path = "/experiment";
    if (!fs.isDirectory(new Path(path))) {
        fs.mkdirs(new Path(path));
    } else {// w ww. jav  a 2s .c  o m
        fs.delete(new Path(path), true);
        fs.mkdirs(new Path(path));
    }
    String uploadDirPath = path + "/upload";
    fs.mkdirs(new Path(uploadDirPath));
    String downloadDirPath = path + "/download";
    fs.mkdirs(new Path(downloadDirPath));

    String dataFile = uploadDirPath + "/file";
    Random rand = new Random(1234);
    try (FSDataOutputStream out = fs.create(new Path(dataFile))) {
        for (int i = 0; i < fileSize / pieceSize; i++) {
            byte[] data = new byte[1024];
            rand.nextBytes(data);
            out.write(data);
            out.flush();
        }
        System.err.println("created file - expected:" + fileSize + " created:" + out.size());
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
    fs.close();
}