Example usage for org.apache.hadoop.fs FileSystem subclass-usage

List of usage examples for org.apache.hadoop.fs FileSystem subclass-usage

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem subclass-usage.

Usage

From source file org.apache.hive.common.util.MockFileSystem.java

public class MockFileSystem extends FileSystem {
    final List<MockFile> files = new ArrayList<MockFile>();
    final Map<MockFile, FileStatus> fileStatusMap = new HashMap<>();
    Path workingDir = new Path("/");
    // statics for when the mock fs is created via FileSystem.get
    private static String blockedUgi = null;

From source file org.apache.ignite.hadoop.fs.v1.IgniteHadoopFileSystem.java

/**
 * {@code IGFS} Hadoop 1.x file system driver over file system API. To use
 * {@code IGFS} as Hadoop file system, you should configure this class
 * in Hadoop's {@code core-site.xml} as follows:
 * <pre name="code" class="xml">
 *  &lt;property&gt;

From source file org.apache.ignite.igfs.hadoop.v1.IgfsHadoopFileSystem.java

/**
 * {@code IGFS} Hadoop 1.x file system driver over file system API. To use
 * {@code IGFS} as Hadoop file system, you should configure this class
 * in Hadoop's {@code core-site.xml} as follows:
 * <pre name="code" class="xml">
 *  &lt;property&gt;

From source file org.apache.ignite.internal.processors.hadoop.fs.GridHadoopRawLocalFileSystem.java

/**
 * Local file system implementation for Hadoop.
 */
public class GridHadoopRawLocalFileSystem extends FileSystem {
    /** Working directory for each thread. */
    private final ThreadLocal<Path> workDir = new ThreadLocal<Path>() {

From source file org.apache.ignite.internal.processors.hadoop.fs.HadoopRawLocalFileSystem.java

/**
 * Local file system implementation for Hadoop.
 */
public class HadoopRawLocalFileSystem extends FileSystem {
    /** Working directory for each thread. */
    private final ThreadLocal<Path> workDir = new ThreadLocal<Path>() {

From source file org.apache.ignite.internal.processors.hadoop.impl.fs.HadoopRawLocalFileSystem.java

/**
 * Local file system implementation for Hadoop.
 */
public class HadoopRawLocalFileSystem extends FileSystem {
    /** Working directory for each thread. */
    private final ThreadLocal<Path> workDir = new ThreadLocal<Path>() {

From source file org.apache.lucene.cassandra.fs.CassandraFileSystem.java

public class CassandraFileSystem extends FileSystem {
    private static final Logger logger = Logger.getLogger(CassandraFileSystem.class);

    private URI uri;

    public final CassandraFileSystemStore store;

From source file org.apache.orc.bench.core.NullFileSystem.java

public class NullFileSystem extends FileSystem {
    @Override
    public URI getUri() {
        try {
            return new URI("null:///");
        } catch (URISyntaxException e) {

From source file org.apache.tajo.storage.s3.MockS3FileSystem.java

public class MockS3FileSystem extends FileSystem {
    private URI uri;

    @Override
    public void initialize(URI uri, Configuration conf) throws IOException {
        super.initialize(uri, conf);

From source file org.gridgain.grid.ggfs.hadoop.v1.GridGgfsHadoopFileSystem.java

/**
 * {@code GGFS} Hadoop 1.x file system driver over file system API. To use
 * {@code GGFS} as Hadoop file system, you should configure this class
 * in Hadoop's {@code core-site.xml} as follows:
 * <pre name="code" class="xml">
 *  &lt;property&gt;