Example usage for org.apache.hadoop.fs FileSystem newInstance

List of usage examples for org.apache.hadoop.fs FileSystem newInstance

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem newInstance.

Prototype

public static FileSystem newInstance(Configuration conf) throws IOException 

Source Link

Document

Returns a unique configured FileSystem implementation for the default filesystem of the supplied configuration.

Usage

From source file:org.apache.metamodel.util.HdfsResource.java

License:Apache License

public FileSystem getHadoopFileSystem() {
    try {//w w  w . jav a2s.co  m
        return FileSystem.newInstance(getHadoopConfiguration());
    } catch (IOException e) {
        throw new MetaModelException("Could not connect to HDFS: " + e.getMessage(), e);
    }
}

From source file:org.apache.metron.common.dsl.functions.resolver.ClasspathFunctionResolverIntegrationTest.java

License:Apache License

@BeforeClass
public static void setup() {
    component = new MRComponent().withBasePath("target");
    component.start();//from w ww  . j a va2s  .  co  m
    configuration = component.getConfiguration();

    try {
        FileSystem fs = FileSystem.newInstance(configuration);
        fs.mkdirs(new Path("/classpath-resources"));
        fs.copyFromLocalFile(new Path("src/test/classpath-resources/custom-1.0-SNAPSHOT.jar"),
                new Path("/classpath-resources"));
    } catch (IOException e) {
        throw new RuntimeException("Unable to start cluster", e);
    }
}

From source file:org.apache.metron.common.utils.HDFSUtils.java

License:Apache License

/**
 * Reads full HDFS FS file contents into a String. Opens and closes the file system on each call.
 * Never null./*from  w  w  w . j ava2s. c om*/
 *
 * @param config Hadoop configuration
 * @param path path to file
 * @return file contents as a String
 * @throws IOException
 */
public static List<String> readFile(Configuration config, String path) throws IOException {
    FileSystem fs = FileSystem.newInstance(config);
    Path hdfsPath = new Path(path);
    FSDataInputStream inputStream = fs.open(hdfsPath);
    return IOUtils.readLines(inputStream, "UTF-8");
}

From source file:org.apache.phoenix.hbase.index.write.recovery.TestPerRegionIndexWriteCache.java

License:Apache License

@SuppressWarnings("deprecation")
@Before//from   ww  w  .j  a  v  a  2 s.c o  m
public void setUp() throws Exception {
    Path hbaseRootDir = TEST_UTIL.getDataTestDir();
    TEST_UTIL.getConfiguration().set("hbase.rootdir", hbaseRootDir.toString());

    FileSystem newFS = FileSystem.newInstance(TEST_UTIL.getConfiguration());
    HRegionInfo hri = new HRegionInfo(tableName, null, null, false);
    Path basedir = FSUtils.getTableDir(hbaseRootDir, tableName);
    Random rn = new Random();
    tableName = TableName.valueOf("TestPerRegion" + rn.nextInt());
    WALFactory walFactory = new WALFactory(TEST_UTIL.getConfiguration(), null, "TestPerRegionIndexWriteCache");
    wal = walFactory.getWAL(Bytes.toBytes("logs"), null);
    HTableDescriptor htd = new HTableDescriptor(tableName);
    HColumnDescriptor a = new HColumnDescriptor(Bytes.toBytes("a"));
    htd.addFamily(a);

    r1 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {
        @Override
        public int hashCode() {
            return 1;
        }

        @Override
        public String toString() {
            return "testRegion1";
        }
    };

    r2 = new HRegion(basedir, wal, newFS, TEST_UTIL.getConfiguration(), hri, htd, null) {
        @Override
        public int hashCode() {
            return 2;
        }

        @Override
        public String toString() {
            return "testRegion1";
        }
    };
}

From source file:org.apache.pirk.responder.wideskies.mapreduce.ColumnMultReducer.java

License:Apache License

@Override
public void setup(Context ctx) throws IOException, InterruptedException {
    super.setup(ctx);

    outputValue = new Text();
    mos = new MultipleOutputs<>(ctx);

    FileSystem fs = FileSystem.newInstance(ctx.getConfiguration());
    String queryDir = ctx.getConfiguration().get("pirMR.queryInputDir");
    query = new HadoopFileSystemStore(fs).recall(queryDir, Query.class);
}

From source file:org.apache.pirk.responder.wideskies.mapreduce.ExpTableMapper.java

License:Apache License

@Override
public void setup(Context ctx) throws IOException, InterruptedException {
    super.setup(ctx);

    valueOut = new Text();

    String queryDir = ctx.getConfiguration().get("pirMR.queryInputDir");
    query = new HadoopFileSystemStore(FileSystem.newInstance(ctx.getConfiguration())).recall(queryDir,
            Query.class);

    int dataPartitionBitSize = query.getQueryInfo().getDataPartitionBitSize();
    maxValue = (int) Math.pow(2, dataPartitionBitSize) - 1;

    NSquared = query.getNSquared();//  www.  ja v a2s . co  m
}

From source file:org.apache.pirk.responder.wideskies.mapreduce.FinalResponseReducer.java

License:Apache License

@Override
public void setup(Context ctx) throws IOException, InterruptedException {
    super.setup(ctx);

    mos = new MultipleOutputs<>(ctx);

    FileSystem fs = FileSystem.newInstance(ctx.getConfiguration());
    storage = new HadoopFileSystemStore(fs);
    String queryDir = ctx.getConfiguration().get("pirMR.queryInputDir");
    Query query = storage.recall(queryDir, Query.class);
    QueryInfo queryInfo = query.getQueryInfo();

    outputFile = ctx.getConfiguration().get("pirMR.outputFile");

    response = new Response(queryInfo);
}

From source file:org.apache.pirk.responder.wideskies.mapreduce.HashSelectorsAndPartitionDataMapper.java

License:Apache License

@Override
public void setup(Context ctx) throws IOException, InterruptedException {
    super.setup(ctx);

    logger.info("Setting up the mapper");

    keyOut = new IntWritable();

    FileSystem fs = FileSystem.newInstance(ctx.getConfiguration());

    // Can make this so that it reads multiple queries at one time...
    String queryDir = ctx.getConfiguration().get("pirMR.queryInputDir");
    Query query = new HadoopFileSystemStore(fs).recall(queryDir, Query.class);
    queryInfo = query.getQueryInfo();/*w  w w. ja  va  2  s  . co m*/

    try {
        SystemConfiguration.setProperty("data.schemas", ctx.getConfiguration().get("data.schemas"));
        SystemConfiguration.setProperty("query.schemas", ctx.getConfiguration().get("query.schemas"));
        SystemConfiguration.setProperty("pir.stopListFile", ctx.getConfiguration().get("pirMR.stopListFile"));

        DataSchemaLoader.initialize(true, fs);
        QuerySchemaLoader.initialize(true, fs);

    } catch (Exception e) {
        e.printStackTrace();
    }

    if (ctx.getConfiguration().get("pir.allowAdHocQuerySchemas", "false").equals("true")) {
        qSchema = queryInfo.getQuerySchema();
    }
    if (qSchema == null) {
        qSchema = QuerySchemaRegistry.get(queryInfo.getQueryType());
    }
    dSchema = DataSchemaRegistry.get(qSchema.getDataSchemaName());

    try {
        filter = qSchema.getFilter();
    } catch (Exception e) {
        e.printStackTrace();
    }
}

From source file:org.apache.pirk.responder.wideskies.mapreduce.RowCalcReducer.java

License:Apache License

@Override
public void setup(Context ctx) throws IOException, InterruptedException {
    super.setup(ctx);

    keyOut = new LongWritable();
    valueOut = new Text();
    mos = new MultipleOutputs<>(ctx);

    fs = FileSystem.newInstance(ctx.getConfiguration());
    String queryDir = ctx.getConfiguration().get("pirMR.queryInputDir");
    query = new HadoopFileSystemStore(fs).recall(queryDir, Query.class);
    queryInfo = query.getQueryInfo();//  w w  w. jav a 2 s .  c  o m

    try {
        SystemConfiguration.setProperty("data.schemas", ctx.getConfiguration().get("data.schemas"));
        SystemConfiguration.setProperty("query.schemas", ctx.getConfiguration().get("query.schemas"));
        SystemConfiguration.setProperty("pir.stopListFile", ctx.getConfiguration().get("pirMR.stopListFile"));

        DataSchemaLoader.initialize(true, fs);
        QuerySchemaLoader.initialize(true, fs);

    } catch (Exception e) {
        e.printStackTrace();
    }

    if (ctx.getConfiguration().get("pirWL.useLocalCache").equals("true")) {
        useLocalCache = true;
    }
    if (ctx.getConfiguration().get("pirWL.limitHitsPerSelector").equals("true")) {
        limitHitsPerSelector = true;
    }
    maxHitsPerSelector = Integer.parseInt(ctx.getConfiguration().get("pirWL.maxHitsPerSelector"));

    logger.info("RowCalcReducer -- useLocalCache = " + useLocalCache + " limitHitsPerSelector =  "
            + limitHitsPerSelector + " maxHitsPerSelector = " + maxHitsPerSelector);
}

From source file:org.datacleaner.spark.ApplicationDriver.java

License:Open Source License

private List<String> buildJarFiles(MutableRef<String> primaryJarRef) throws IOException {
    final List<String> list = new ArrayList<>();

    final Configuration conf = new Configuration();
    conf.set("fs.defaultFS", "hdfs://" + _hostname + ":" + _port);

    final FileSystem fs = FileSystem.newInstance(conf);
    try {//from w ww. j  av  a2s .  c om
        final Path directoryPath = new Path(_jarDirectoryPath);
        final RemoteIterator<LocatedFileStatus> files = fs.listFiles(directoryPath, false);
        while (files.hasNext()) {
            final LocatedFileStatus file = files.next();
            final Path path = file.getPath();
            final String filename = path.getName();
            if (filename.startsWith(PRIMARY_JAR_FILENAME_PREFIX)) {
                primaryJarRef.set(path.toString());
            } else {
                list.add(path.toString());
            }
        }
    } finally {
        FileHelper.safeClose(fs);
    }

    if (primaryJarRef.get() == null) {
        throw new IllegalArgumentException("Failed to find primary jar (starting with '"
                + PRIMARY_JAR_FILENAME_PREFIX + "') in JAR file directory: " + _jarDirectoryPath);
    }

    return list;
}