Example usage for org.apache.hadoop.fs FileSystem setDefaultUri

List of usage examples for org.apache.hadoop.fs FileSystem setDefaultUri

Introduction

In this page you can find the example usage for org.apache.hadoop.fs FileSystem setDefaultUri.

Prototype

public static void setDefaultUri(Configuration conf, String uri) 

Source Link

Document

Set the default FileSystem URI in a configuration.

Usage

From source file:org.lilyproject.hadooptestfw.fork.MiniMRCluster.java

License:Apache License

static JobConf configureJobConf(JobConf conf, String namenode, int jobTrackerPort, int jobTrackerInfoPort,
        UserGroupInformation ugi) {//from  w  w  w  .  j a v a  2s  . c o  m
    JobConf result = new JobConf(conf);
    FileSystem.setDefaultUri(result, namenode);
    result.set("mapred.job.tracker", "localhost:" + jobTrackerPort);
    result.set("mapred.job.tracker.http.address", "127.0.0.1:" + jobTrackerInfoPort);
    // for debugging have all task output sent to the test output
    JobClient.setTaskOutputFilter(result, JobClient.TaskStatusFilter.ALL);
    return result;
}

From source file:org.springframework.data.hadoop.impala.common.ConfigurationCommands.java

License:Apache License

@CliCommand(value = { PREFIX + "fs" }, help = "Sets the Hadoop namenode - can be 'local' or <namenode:port>")
public void setFs(@CliOption(key = { "",
        "namenode" }, mandatory = true, help = "Namenode address - local|<namenode:port>") String namenode) {
    FileSystem.setDefaultUri(hadoopConfiguration, namenode);
}

From source file:org.springframework.data.hadoop.test.support.StandaloneHadoopCluster.java

License:Apache License

@SuppressWarnings("deprecation")
@Override//from   w  ww.j  a  v  a2  s . c  o m
public void start() throws IOException {
    log.info("Checking if cluster=" + clusterName + " needs to be started");
    synchronized (this.startupShutdownMonitor) {
        if (started) {
            return;
        }

        // TODO: fix for MAPREDUCE-2785
        // I don't like it at all to do it like this, but
        // until we find a better way for the fix,
        // just set the system property
        // who knows what kind of problems this will cause!!!
        // keeping this here as reminder for the next guy who
        // clean up the mess
        String tmpDir = getTmpDir();
        System.setProperty("hadoop.log.dir", tmpDir);

        // need to get unique dir per cluster
        System.setProperty("test.build.data", "build/test/data/" + clusterName);

        log.info("Starting cluster=" + clusterName);

        Configuration config = new JobConf();

        // umask trick
        String umask = getCurrentUmask(tmpDir, config);
        if (umask != null) {
            log.info("Setting expected umask to " + umask);
            config.set("dfs.datanode.data.dir.perm", umask);
        }

        // dfs cluster is updating config
        // newer dfs cluster are using builder pattern
        // but we need to support older versions in
        // a same code. there are some ramifications if
        // deprecated methods are removed in a future
        dfsCluster = new MiniDFSCluster(config, nodes, true, null);

        // we need to ask the config from mr cluster after init
        // returns. for this case it is not guaranteed that passed config
        // is updated.
        // we do all this via compatibility class which uses
        // reflection because at this point we don't know
        // the exact runtime implementation

        FileSystem fs = dfsCluster.getFileSystem();
        log.info("Dfs cluster uri= " + fs.getUri());

        mrClusterObject = MiniMRClusterCompat.instantiateCluster(this.getClass(), nodes, config, fs,
                this.getClass().getClassLoader());

        configuration = MiniMRClusterCompat.getConfiguration(mrClusterObject);

        // set default uri again in case it wasn't updated
        FileSystem.setDefaultUri(configuration, fs.getUri());

        log.info("Started cluster=" + clusterName);
        started = true;
    }
}

From source file:org.springframework.xd.integration.util.HadoopUtils.java

License:Apache License

/**
 * Initializes the hadoop utils with the name node that was specified in the environment.
 *
 * @param xdEnvironment the environment for the test.
 *//*from  w  ww  .ja va2  s.com*/
public HadoopUtils(XdEnvironment xdEnvironment) {
    Assert.notNull(xdEnvironment, "xdEnvironment can not be null");
    this.nameNode = xdEnvironment.getNameNode();

    hadoopConfiguration = new Configuration();
    FileSystem.setDefaultUri(hadoopConfiguration, nameNode);
    shell = new FsShell(hadoopConfiguration);
    dataNodePort = xdEnvironment.getDataNodePort();
}

From source file:org.springframework.xd.shell.hadoop.ConfigurationCommands.java

License:Apache License

@CliCommand(value = { PREFIX + "fs" }, help = "Sets the Hadoop namenode")
public void setFs(@CliOption(key = { "",
        "namenode" }, mandatory = true, help = "namenode URL - can be file:///|hdfs://<namenode>:<port>|webhdfs://<namenode>:<port>") String namenode) {
    FileSystem.setDefaultUri(hadoopConfiguration, namenode);
}

From source file:wanggang1987.bigdataapi.hadoopapi.HadoopClientAPI.java

public void init(String usr, String hdfsURL) {
    try {//from  ww  w .ja  va 2 s. c  o  m
        System.setProperty("HADOOP_USER_NAME", usr);
        HADOOP_URL = hdfsURL;
        FileSystem.setDefaultUri(conf, HADOOP_URL);
        fs = FileSystem.get(conf);
        hdfs = (DistributedFileSystem) fs;
    } catch (Exception e) {
        logger.error("init failed", e);
    }
}