Example usage for org.apache.hadoop.fs FileSystem getServerDefaults

List of usage examples for org.apache.hadoop.fs FileSystem getServerDefaults

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem getServerDefaults.

Prototype

public FsServerDefaults getServerDefaults(Path p) throws IOException 

Source Link

Document

Return a set of server default configuration values.

Usage

From source file:com.ruizhan.hadoop.hdfs.Trash.java

License:Apache License

/**
 * In case of the symlinks or mount points, one has to move the appropriate
 * trashbin in the actual volume of the path p being deleted.
 *
 * Hence we get the file system of the fully-qualified resolved-path and
 * then move the path p to the trashbin in that volume,
 * @param fs - the filesystem of path p/*from w ww . j  av a2  s. c  o  m*/
 * @param p - the  path being deleted - to be moved to trasg
 * @param conf - configuration
 * @return false if the item is already in the trash or trash is disabled
 * @throws IOException on error
 */
public static boolean moveToAppropriateTrash(FileSystem fs, Path p, Configuration conf) throws IOException {
    Path fullyResolvedPath = fs.resolvePath(p);
    FileSystem fullyResolvedFs = FileSystem.get(fullyResolvedPath.toUri(), conf);
    // If the trash interval is configured server side then clobber this
    // configuration so that we always respect the server configuration.
    try {
        long trashInterval = fullyResolvedFs.getServerDefaults(fullyResolvedPath).getTrashInterval();
        if (0 != trashInterval) {
            Configuration confCopy = new Configuration(conf);
            confCopy.setLong(CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY, trashInterval);
            conf = confCopy;
        }
    } catch (Exception e) {
        // If we can not determine that trash is enabled server side then
        // bail rather than potentially deleting a file when trash is enabled.
        throw new IOException("Failed to get server trash configuration", e);
    }
    Trash trash = new Trash(fullyResolvedFs, conf);
    boolean success = trash.moveToTrash(fullyResolvedPath);
    if (success) {
        System.out.println("Moved: '" + p + "' to trash at: " + trash.getCurrentTrashDir());
    }
    return success;
}

From source file:org.apache.solr.store.hdfs.HdfsFileWriter.java

License:Apache License

public HdfsFileWriter(FileSystem fileSystem, Path path) throws IOException {
    LOG.debug("Creating writer on {}", path);
    this.path = path;

    Configuration conf = fileSystem.getConf();
    FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);
    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE);
    if (Boolean.getBoolean(HDFS_SYNC_BLOCK)) {
        flags.add(CreateFlag.SYNC_BLOCK);
    }/*from  w  ww .  j av  a 2s.  com*/
    outputStream = fileSystem.create(path, FsPermission.getDefault().applyUMask(FsPermission.getUMask(conf)),
            flags, fsDefaults.getFileBufferSize(), fsDefaults.getReplication(), fsDefaults.getBlockSize(),
            null);
}