Example usage for java.net InetSocketAddress getHostName

List of usage examples for java.net InetSocketAddress getHostName

Introduction

In this page you can find the example usage for java.net InetSocketAddress getHostName.

Prototype

public final String getHostName() 

Source Link

Document

Gets the hostname .

Usage

From source file:org.apache.hadoop.dfs.FSNamesystem.java

/**
 * Initialize FSNamesystem./*from ww  w  .ja  va 2s.c om*/
 */
private void initialize(NameNode nn, Configuration conf) throws IOException {
    this.systemStart = now();
    this.startTime = new Date(systemStart);
    setConfigurationParameters(conf);

    this.localMachine = nn.getNameNodeAddress().getHostName();
    this.port = nn.getNameNodeAddress().getPort();
    this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
    this.dir = new FSDirectory(this, conf);
    StartupOption startOpt = NameNode.getStartupOption(conf);
    this.dir.loadFSImage(getNamespaceDirs(conf), startOpt);
    long timeTakenToLoadFSImage = now() - systemStart;
    LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
    NameNode.getNameNodeMetrics().fsImageLoadTime.set((int) timeTakenToLoadFSImage);
    this.safeMode = new SafeModeInfo(conf);
    setBlockTotal();
    pendingReplications = new PendingReplicationBlocks(
            conf.getInt("dfs.replication.pending.timeout.sec", -1) * 1000L);
    this.hbthread = new Daemon(new HeartbeatMonitor());
    this.lmthread = new Daemon(leaseManager.new Monitor());
    this.replthread = new Daemon(new ReplicationMonitor());
    this.resthread = new Daemon(new ResolutionMonitor());
    hbthread.start();
    lmthread.start();
    replthread.start();
    resthread.start();

    this.hostsReader = new HostsFileReader(conf.get("dfs.hosts", ""), conf.get("dfs.hosts.exclude", ""));
    this.dnthread = new Daemon(
            new DecommissionManager(this).new Monitor(conf.getInt("dfs.namenode.decommission.interval", 30),
                    conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5)));
    dnthread.start();

    this.dnsToSwitchMapping = (DNSToSwitchMapping) ReflectionUtils.newInstance(conf.getClass(
            "topology.node.switch.mapping.impl", ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);

    String infoAddr = NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port",
            "dfs.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new StatusHttpServer("dfs", infoHost, tmpInfoPort, tmpInfoPort == 0);
    InetSocketAddress secInfoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.https.address", infoHost + ":" + 0));
    Configuration sslConf = new Configuration(conf);
    sslConf.addResource(conf.get("https.keystore.info.rsrc", "sslinfo.xml"));
    String keyloc = sslConf.get("https.keystore.location");
    if (null != keyloc) {
        this.infoServer.addSslListener(secInfoSocAddr, keyloc, sslConf.get("https.keystore.password", ""),
                sslConf.get("https.keystore.keypassword", ""));
    }
    // assume same ssl port for all datanodes
    InetSocketAddress datanodeSslPort = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 50475));
    this.infoServer.setAttribute("datanode.https.port", datanodeSslPort.getPort());
    this.infoServer.setAttribute("name.node", nn);
    this.infoServer.setAttribute("name.system.image", getFSImage());
    this.infoServer.setAttribute("name.conf", conf);
    this.infoServer.addServlet("fsck", "/fsck", FsckServlet.class);
    this.infoServer.addServlet("getimage", "/getimage", GetImageServlet.class);
    this.infoServer.addServlet("listPaths", "/listPaths/*", ListPathsServlet.class);
    this.infoServer.addServlet("data", "/data/*", FileDataServlet.class);
    this.infoServer.start();

    // The web-server port can be ephemeral... ensure we have the correct info
    this.infoPort = this.infoServer.getPort();
    conf.set("dfs.http.address", infoHost + ":" + infoPort);
    LOG.info("Web-server up at: " + infoHost + ":" + infoPort);
}

From source file:org.apache.hadoop.mapred.JobTracker.java

JobTracker(final JobConf conf, String identifier, Clock clock, QueueManager qm)
        throws IOException, InterruptedException {
    this.queueManager = qm;
    this.clock = clock;
    // Set ports, start RPC servers, setup security policy etc.
    InetSocketAddress addr = getAddress(conf);
    this.localMachine = addr.getHostName();
    this.port = addr.getPort();
    // find the owner of the process
    // get the desired principal to load
    UserGroupInformation.setConfiguration(conf);
    SecurityUtil.login(conf, JT_KEYTAB_FILE, JT_USER_NAME, localMachine);

    long secretKeyInterval = conf.getLong(DELEGATION_KEY_UPDATE_INTERVAL_KEY,
            DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT);
    long tokenMaxLifetime = conf.getLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY,
            DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT);
    long tokenRenewInterval = conf.getLong(DELEGATION_TOKEN_RENEW_INTERVAL_KEY,
            DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT);
    secretManager = new DelegationTokenSecretManager(secretKeyInterval, tokenMaxLifetime, tokenRenewInterval,
            DELEGATION_TOKEN_GC_INTERVAL);
    secretManager.startThreads();//from   ww w.  j  a  v  a  2  s . co m

    MAX_JOBCONF_SIZE = conf.getLong(MAX_USER_JOBCONF_SIZE_KEY, MAX_JOBCONF_SIZE);
    //
    // Grab some static constants
    //
    TASKTRACKER_EXPIRY_INTERVAL = conf.getLong("mapred.tasktracker.expiry.interval", 10 * 60 * 1000);
    RETIRE_JOB_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.interval", 24 * 60 * 60 * 1000);
    RETIRE_JOB_CHECK_INTERVAL = conf.getLong("mapred.jobtracker.retirejob.check", 60 * 1000);
    retiredJobsCacheSize = conf.getInt("mapred.job.tracker.retiredjobs.cache.size", 1000);
    MAX_COMPLETE_USER_JOBS_IN_MEMORY = conf.getInt("mapred.jobtracker.completeuserjobs.maximum", 100);

    // values related to heuristic graylisting (a "fault" is a per-job
    // blacklisting; too many faults => node is graylisted across all jobs):
    TRACKER_FAULT_TIMEOUT_WINDOW = // 3 hours
            conf.getInt("mapred.jobtracker.blacklist.fault-timeout-window", 3 * 60);
    TRACKER_FAULT_BUCKET_WIDTH = // 15 minutes
            conf.getInt("mapred.jobtracker.blacklist.fault-bucket-width", 15);
    TRACKER_FAULT_THRESHOLD = conf.getInt("mapred.max.tracker.blacklists", 4);
    // future:  rename to "mapred.jobtracker.blacklist.fault-threshold" for
    // namespace consistency

    if (TRACKER_FAULT_BUCKET_WIDTH > TRACKER_FAULT_TIMEOUT_WINDOW) {
        TRACKER_FAULT_BUCKET_WIDTH = TRACKER_FAULT_TIMEOUT_WINDOW;
    }
    TRACKER_FAULT_BUCKET_WIDTH_MSECS = (long) TRACKER_FAULT_BUCKET_WIDTH * 60 * 1000;

    // ideally, TRACKER_FAULT_TIMEOUT_WINDOW should be an integral multiple of
    // TRACKER_FAULT_BUCKET_WIDTH, but round up just in case:
    NUM_FAULT_BUCKETS = (TRACKER_FAULT_TIMEOUT_WINDOW + TRACKER_FAULT_BUCKET_WIDTH - 1)
            / TRACKER_FAULT_BUCKET_WIDTH;

    NUM_HEARTBEATS_IN_SECOND = conf.getInt(JT_HEARTBEATS_IN_SECOND, DEFAULT_NUM_HEARTBEATS_IN_SECOND);
    if (NUM_HEARTBEATS_IN_SECOND < MIN_NUM_HEARTBEATS_IN_SECOND) {
        NUM_HEARTBEATS_IN_SECOND = DEFAULT_NUM_HEARTBEATS_IN_SECOND;
    }

    HEARTBEATS_SCALING_FACTOR = conf.getFloat(JT_HEARTBEATS_SCALING_FACTOR, DEFAULT_HEARTBEATS_SCALING_FACTOR);
    if (HEARTBEATS_SCALING_FACTOR < MIN_HEARTBEATS_SCALING_FACTOR) {
        HEARTBEATS_SCALING_FACTOR = DEFAULT_HEARTBEATS_SCALING_FACTOR;
    }

    // This configuration is there solely for tuning purposes and
    // once this feature has been tested in real clusters and an appropriate
    // value for the threshold has been found, this config might be taken out.
    AVERAGE_BLACKLIST_THRESHOLD = conf.getFloat("mapred.cluster.average.blacklist.threshold", 0.5f);

    // This is a directory of temporary submission files.  We delete it
    // on startup, and can delete any files that we're done with
    this.conf = conf;
    JobConf jobConf = new JobConf(conf);

    initializeTaskMemoryRelatedConfig();

    // Read the hosts/exclude files to restrict access to the jobtracker.
    this.hostsReader = new HostsFileReader(conf.get("mapred.hosts", ""), conf.get("mapred.hosts.exclude", ""));
    aclsManager = new ACLsManager(conf, new JobACLsManager(conf), queueManager);

    LOG.info("Starting jobtracker with owner as " + getMROwner().getShortUserName());

    // Create the scheduler
    Class<? extends TaskScheduler> schedulerClass = conf.getClass("mapred.jobtracker.taskScheduler",
            JobQueueTaskScheduler.class, TaskScheduler.class);
    taskScheduler = (TaskScheduler) ReflectionUtils.newInstance(schedulerClass, conf);

    // Set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new MapReducePolicyProvider());
    }

    int handlerCount = conf.getInt("mapred.job.tracker.handler.count", 10);
    this.interTrackerServer = RPC.getServer(this, addr.getHostName(), addr.getPort(), handlerCount, false, conf,
            secretManager);
    if (LOG.isDebugEnabled()) {
        Properties p = System.getProperties();
        for (Iterator it = p.keySet().iterator(); it.hasNext();) {
            String key = (String) it.next();
            String val = p.getProperty(key);
            LOG.debug("Property '" + key + "' is " + val);
        }
    }

    String infoAddr = NetUtils.getServerAddress(conf, "mapred.job.tracker.info.bindAddress",
            "mapred.job.tracker.info.port", "mapred.job.tracker.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoBindAddress = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.startTime = clock.getTime();
    infoServer = new HttpServer("job", infoBindAddress, tmpInfoPort, tmpInfoPort == 0, conf,
            aclsManager.getAdminsAcl());
    infoServer.setAttribute("job.tracker", this);
    // initialize history parameters.
    final JobTracker jtFinal = this;
    getMROwner().doAs(new PrivilegedExceptionAction<Boolean>() {
        @Override
        public Boolean run() throws Exception {
            JobHistory.init(jtFinal, conf, jtFinal.localMachine, jtFinal.startTime);
            return true;
        }
    });

    infoServer.addServlet("reducegraph", "/taskgraph", TaskGraphServlet.class);
    infoServer.start();

    this.trackerIdentifier = identifier;

    createInstrumentation();

    // The rpc/web-server ports can be ephemeral ports... 
    // ... ensure we have the correct info
    this.port = interTrackerServer.getListenerAddress().getPort();
    this.conf.set("mapred.job.tracker", (this.localMachine + ":" + this.port));
    this.localFs = FileSystem.getLocal(conf);
    LOG.info("JobTracker up at: " + this.port);
    this.infoPort = this.infoServer.getPort();
    this.conf.set("mapred.job.tracker.http.address", infoBindAddress + ":" + this.infoPort);
    LOG.info("JobTracker webserver: " + this.infoServer.getPort());

    // start the recovery manager
    recoveryManager = new RecoveryManager();

    while (!Thread.currentThread().isInterrupted()) {
        try {
            // if we haven't contacted the namenode go ahead and do it
            if (fs == null) {
                fs = getMROwner().doAs(new PrivilegedExceptionAction<FileSystem>() {
                    public FileSystem run() throws IOException {
                        return FileSystem.get(conf);
                    }
                });
            }
            // clean up the system dir, which will only work if hdfs is out of 
            // safe mode
            if (systemDir == null) {
                systemDir = new Path(getSystemDir());
            }
            try {
                FileStatus systemDirStatus = fs.getFileStatus(systemDir);
                if (!systemDirStatus.getOwner().equals(getMROwner().getShortUserName())) {
                    throw new AccessControlException("The systemdir " + systemDir + " is not owned by "
                            + getMROwner().getShortUserName());
                }
                if (!systemDirStatus.getPermission().equals(SYSTEM_DIR_PERMISSION)) {
                    LOG.warn("Incorrect permissions on " + systemDir + ". Setting it to "
                            + SYSTEM_DIR_PERMISSION);
                    fs.setPermission(systemDir, new FsPermission(SYSTEM_DIR_PERMISSION));
                }
            } catch (FileNotFoundException fnf) {
            } //ignore
            // Make sure that the backup data is preserved
            FileStatus[] systemDirData = fs.listStatus(this.systemDir);
            // Check if the history is enabled .. as we cant have persistence with 
            // history disabled
            if (conf.getBoolean("mapred.jobtracker.restart.recover", false) && systemDirData != null) {
                for (FileStatus status : systemDirData) {
                    try {
                        recoveryManager.checkAndAddJob(status);
                    } catch (Throwable t) {
                        LOG.warn("Failed to add the job " + status.getPath().getName(), t);
                    }
                }

                // Check if there are jobs to be recovered
                hasRestarted = recoveryManager.shouldRecover();
                if (hasRestarted) {
                    break; // if there is something to recover else clean the sys dir
                }
            }
            LOG.info("Cleaning up the system directory");
            fs.delete(systemDir, true);
            if (FileSystem.mkdirs(fs, systemDir, new FsPermission(SYSTEM_DIR_PERMISSION))) {
                break;
            }
            LOG.error("Mkdirs failed to create " + systemDir);
        } catch (AccessControlException ace) {
            LOG.warn("Failed to operate on mapred.system.dir (" + systemDir + ") because of permissions.");
            LOG.warn(
                    "Manually delete the mapred.system.dir (" + systemDir + ") and then start the JobTracker.");
            LOG.warn("Bailing out ... ", ace);
            throw ace;
        } catch (IOException ie) {
            LOG.info("problem cleaning system directory: " + systemDir, ie);
        }
        Thread.sleep(FS_ACCESS_RETRY_PERIOD);
    }

    if (Thread.currentThread().isInterrupted()) {
        throw new InterruptedException();
    }

    // Same with 'localDir' except it's always on the local disk.
    if (!hasRestarted) {
        jobConf.deleteLocalFiles(SUBDIR);
    }

    // Initialize history DONE folder
    FileSystem historyFS = getMROwner().doAs(new PrivilegedExceptionAction<FileSystem>() {
        public FileSystem run() throws IOException {
            JobHistory.initDone(conf, fs);
            final String historyLogDir = JobHistory.getCompletedJobHistoryLocation().toString();
            infoServer.setAttribute("historyLogDir", historyLogDir);

            infoServer.setAttribute("serialNumberDirectoryDigits",
                    Integer.valueOf(JobHistory.serialNumberDirectoryDigits()));

            infoServer.setAttribute("serialNumberTotalDigits",
                    Integer.valueOf(JobHistory.serialNumberTotalDigits()));

            return new Path(historyLogDir).getFileSystem(conf);
        }
    });
    infoServer.setAttribute("fileSys", historyFS);
    infoServer.setAttribute("jobConf", conf);
    infoServer.setAttribute("aclManager", aclsManager);

    if (JobHistoryServer.isEmbedded(conf)) {
        LOG.info("History server being initialized in embedded mode");
        jobHistoryServer = new JobHistoryServer(conf, aclsManager, infoServer);
        jobHistoryServer.start();
        LOG.info("Job History Server web address: " + JobHistoryServer.getAddress(conf));
    }

    this.dnsToSwitchMapping = ReflectionUtils.newInstance(conf.getClass("topology.node.switch.mapping.impl",
            ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);
    this.numTaskCacheLevels = conf.getInt("mapred.task.cache.levels", NetworkTopology.DEFAULT_HOST_LEVEL);

    //initializes the job status store
    completedJobStatusStore = new CompletedJobStatusStore(conf, aclsManager);
}

From source file:org.apache.hadoop.hdfs.server.namenode.FSNamesystem.java

/**
 * Initialize FSNamesystem./*  www .  j  a  v  a 2  s.  c om*/
 */
private void initialize(NameNode nn, Configuration conf) throws IOException {
    this.systemStart = now();
    setConfigurationParameters(conf);
    dtSecretManager = createDelegationTokenSecretManager(conf);

    this.nameNodeAddress = nn.getNameNodeAddress();
    this.registerMBean(conf); // register the MBean for the FSNamesystemStutus
    this.dir = new FSDirectory(this, conf);
    StartupOption startOpt = NameNode.getStartupOption(conf);
    this.dir.loadFSImage(getNamespaceDirs(conf), getNamespaceEditsDirs(conf), startOpt);
    long timeTakenToLoadFSImage = now() - systemStart;
    LOG.info("Finished loading FSImage in " + timeTakenToLoadFSImage + " msecs");
    NameNode.getNameNodeMetrics().setFsImageLoadTime(timeTakenToLoadFSImage);
    this.safeMode = new SafeModeInfo(conf);
    setBlockTotal();
    pendingReplications = new PendingReplicationBlocks(
            conf.getInt("dfs.replication.pending.timeout.sec", -1) * 1000L);
    if (isAccessTokenEnabled) {
        accessTokenHandler = new BlockTokenSecretManager(true, accessKeyUpdateInterval, accessTokenLifetime);
    }
    this.hbthread = new Daemon(new HeartbeatMonitor());
    this.lmthread = new Daemon(leaseManager.new Monitor());
    this.replmon = new ReplicationMonitor();
    this.replthread = new Daemon(replmon);
    hbthread.start();
    lmthread.start();
    replthread.start();

    this.hostsReader = new HostsFileReader(conf.get("dfs.hosts", ""), conf.get("dfs.hosts.exclude", ""));
    this.dnthread = new Daemon(
            new DecommissionManager(this).new Monitor(conf.getInt("dfs.namenode.decommission.interval", 30),
                    conf.getInt("dfs.namenode.decommission.nodes.per.interval", 5)));
    dnthread.start();

    this.dnsToSwitchMapping = ReflectionUtils.newInstance(conf.getClass("topology.node.switch.mapping.impl",
            ScriptBasedMapping.class, DNSToSwitchMapping.class), conf);

    /* If the dns to swith mapping supports cache, resolve network 
     * locations of those hosts in the include list, 
     * and store the mapping in the cache; so future calls to resolve
     * will be fast.
     */
    if (dnsToSwitchMapping instanceof CachedDNSToSwitchMapping) {
        dnsToSwitchMapping.resolve(new ArrayList<String>(hostsReader.getHosts()));
    }

    InetSocketAddress socAddr = NameNode.getAddress(conf);
    this.nameNodeHostName = socAddr.getHostName();

    registerWith(DefaultMetricsSystem.INSTANCE);
}

From source file:org.apache.hadoop.gateway.GatewayBasicFuncTest.java

@Test
public void testBasicHdfsUseCase() throws IOException {
    String root = "/tmp/GatewayBasicFuncTest/testBasicHdfsUseCase";
    String username = "hdfs";
    String password = "hdfs-password";
    InetSocketAddress gatewayAddress = driver.gateway.getAddresses()[0];

    // Attempt to delete the test directory in case a previous run failed.
    // Ignore any result.
    // Cleanup anything that might have been leftover because the test failed previously.
    driver.getMock("WEBHDFS").expect().method("DELETE").from("testBasicHdfsUseCase-1").pathInfo("/v1" + root)
            .queryParam("op", "DELETE").queryParam("user.name", username).queryParam("recursive", "true")
            .respond().status(HttpStatus.SC_OK);
    given()//from  ww  w.  j av a  2  s.  c om
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "DELETE").queryParam("recursive", "true").expect().log().all()
            .statusCode(HttpStatus.SC_OK).when().delete(driver.getUrl("WEBHDFS") + "/v1" + root
                    + (driver.isUseGateway() ? "" : "?user.name=" + username));
    driver.assertComplete();

    /* Create a directory.
    curl -i -X PUT "http://<HOST>:<PORT>/<PATH>?op=MKDIRS[&permission=<OCTAL>]"
            
    The client receives a respond with a boolean JSON object:
    HTTP/1.1 HttpStatus.SC_OK OK
    Content-Type: application/json
    Transfer-Encoding: chunked
            
    {"boolean": true}
    */
    driver.getMock("WEBHDFS").expect().method("PUT").pathInfo("/v1" + root + "/dir").queryParam("op", "MKDIRS")
            .queryParam("user.name", username).respond().status(HttpStatus.SC_OK)
            .content(driver.getResourceBytes("webhdfs-success.json")).contentType("application/json");
    given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "MKDIRS").expect()
            //.log().all();
            .statusCode(HttpStatus.SC_OK).contentType("application/json").content("boolean", is(true)).when()
            .put(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir");
    driver.assertComplete();

    driver.getMock("WEBHDFS").expect().method("GET").pathInfo("/v1" + root).queryParam("op", "LISTSTATUS")
            .queryParam("user.name", username).respond().status(HttpStatus.SC_OK)
            .content(driver.getResourceBytes("webhdfs-liststatus-test.json")).contentType("application/json");
    given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_OK).content("FileStatuses.FileStatus[0].pathSuffix", is("dir")).when()
            .get(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();

    //NEGATIVE: Test a bad password.
    given()
            //.log().all()
            .auth().preemptive().basic(username, "invalid-password").header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_UNAUTHORIZED).when().get(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();

    //NEGATIVE: Test a bad user.
    given()
            //.log().all()
            .auth().preemptive().basic("hdfs-user", "hdfs-password").header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_UNAUTHORIZED).when().get(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();

    //NEGATIVE: Test a valid but unauthorized user.
    given()
            //.log().all()
            .auth().preemptive().basic("mapred-user", "mapred-password").header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "LISTSTATUS").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_UNAUTHORIZED).when().get(driver.getUrl("WEBHDFS") + "/v1" + root);

    /* Add a file.
    curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=CREATE
               [&overwrite=<true|false>][&blocksize=<LONG>][&replication=<SHORT>]
             [&permission=<OCTAL>][&buffersize=<INT>]"
            
    The expect is redirected to a datanode where the file data is to be written:
    HTTP/1.1 307 TEMPORARY_REDIRECT
    Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE...
    Content-Length: 0
            
    Step 2: Submit another HTTP PUT expect using the URL in the Location header with the file data to be written.
    curl -i -X PUT -T <LOCAL_FILE> "http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=CREATE..."
            
    The client receives a HttpStatus.SC_CREATED Created respond with zero content length and the WebHDFS URI of the file in the Location header:
    HTTP/1.1 HttpStatus.SC_CREATED Created
    Location: webhdfs://<HOST>:<PORT>/<PATH>
    Content-Length: 0
    */
    driver.getMock("WEBHDFS").expect().method("PUT").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "CREATE").queryParam("user.name", username).respond()
            .status(HttpStatus.SC_TEMPORARY_REDIRECT).header("Location",
                    driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file?op=CREATE&user.name=hdfs");
    driver.getMock("DATANODE").expect().method("PUT").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "CREATE").queryParam("user.name", username).contentType("text/plain")
            .content(driver.getResourceBytes("test.txt"))
            //.content( driver.gerResourceBytes( "hadoop-examples.jar" ) )
            .respond().status(HttpStatus.SC_CREATED)
            .header("Location", "webhdfs://" + driver.getRealAddr("DATANODE") + "/v1" + root + "/dir/file");
    Response response = given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "CREATE").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_TEMPORARY_REDIRECT).when()
            .put(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file");
    String location = response.getHeader("Location");
    log.debug("Redirect location: " + response.getHeader("Location"));
    if (driver.isUseGateway()) {
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
        MatcherAssert.assertThat(location, containsString("?_="));
    }
    MatcherAssert.assertThat(location, not(containsString("host=")));
    MatcherAssert.assertThat(location, not(containsString("port=")));
    response = given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .content(driver.getResourceBytes("test.txt")).contentType("text/plain").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_CREATED).when().put(location);
    location = response.getHeader("Location");
    log.debug("Created location: " + location);
    if (driver.isUseGateway()) {
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
    }
    driver.assertComplete();

    /* Get the file.
    curl -i -L "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=OPEN
               [&offset=<LONG>][&length=<LONG>][&buffersize=<INT>]"
            
    The expect is redirected to a datanode where the file data can be read:
    HTTP/1.1 307 TEMPORARY_REDIRECT
    Location: http://<DATANODE>:<PORT>/webhdfs/v1/<PATH>?op=OPEN...
    Content-Length: 0
            
    The client follows the redirect to the datanode and receives the file data:
    HTTP/1.1 HttpStatus.SC_OK OK
    Content-Type: application/octet-stream
    Content-Length: 22
            
    Hello, webhdfs user!
    */
    driver.getMock("WEBHDFS").expect().method("GET").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "OPEN").queryParam("user.name", username).respond()
            .status(HttpStatus.SC_TEMPORARY_REDIRECT).header("Location",
                    driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file?op=OPEN&user.name=hdfs");
    driver.getMock("DATANODE").expect().method("GET").pathInfo("/v1" + root + "/dir/file")
            .queryParam("op", "OPEN").queryParam("user.name", username).respond().status(HttpStatus.SC_OK)
            .contentType("text/plain").content(driver.getResourceBytes("test.txt"));
    given()
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "OPEN").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_OK).content(is("TEST")).when()
            .get(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file");
    driver.assertComplete();

    /* Delete the directory.
    curl -i -X DELETE "http://<host>:<port>/webhdfs/v1/<path>?op=DELETE
                         [&recursive=<true|false>]"
            
    The client receives a respond with a boolean JSON object:
    HTTP/1.1 HttpStatus.SC_OK OK
    Content-Type: application/json
    Transfer-Encoding: chunked
            
    {"boolean": true}
    */
    // Mock the interaction with the namenode.
    driver.getMock("WEBHDFS").expect().from("testBasicHdfsUseCase-1").method("DELETE").pathInfo("/v1" + root)
            .queryParam("op", "DELETE").queryParam("user.name", username).queryParam("recursive", "true")
            .respond().status(HttpStatus.SC_OK);
    given().auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "DELETE").queryParam("recursive", "true").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_OK).when().delete(driver.getUrl("WEBHDFS") + "/v1" + root);
    driver.assertComplete();
}