List of usage examples for org.apache.hadoop.hdfs.server.namenode NameNode getHttpAddress
public InetSocketAddress getHttpAddress()
From source file:com.mellanox.r4h.MiniDFSCluster.java
License:Apache License
private void createNameNode(int nnIndex, Configuration conf, int numDataNodes, boolean format, StartupOption operation, String clusterId, String nameserviceId, String nnId) throws IOException { // Format and clean out DataNode directories if (format) { DFSTestUtil.formatNameNode(conf); }//ww w .j ava 2s. com if (operation == StartupOption.UPGRADE) { operation.setClusterId(clusterId); } // Start the NameNode after saving the default file system. String originalDefaultFs = conf.get(FS_DEFAULT_NAME_KEY); String[] args = createArgs(operation); NameNode nn = NameNode.createNameNode(args, conf); if (operation == StartupOption.RECOVER) { return; } // After the NN has started, set back the bound ports into // the conf conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, nameserviceId, nnId), nn.getNameNodeAddressHostPortString()); if (nn.getHttpAddress() != null) { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTP_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpAddress())); } if (nn.getHttpsAddress() != null) { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_HTTPS_ADDRESS_KEY, nameserviceId, nnId), NetUtils.getHostPortString(nn.getHttpsAddress())); } DFSUtil.setGenericConf(conf, nameserviceId, nnId, DFS_NAMENODE_HTTP_ADDRESS_KEY); nameNodes[nnIndex] = new NameNodeInfo(nn, nameserviceId, nnId, operation, new Configuration(conf)); // Restore the default fs name if (originalDefaultFs == null) { conf.set(FS_DEFAULT_NAME_KEY, ""); } else { conf.set(FS_DEFAULT_NAME_KEY, originalDefaultFs); } }
From source file:com.wandisco.s3hdfs.rewrite.filter.TestMetadata.java
License:Apache License
@Test @SuppressWarnings("deprecation") public void testBasicMetadataRead() throws IOException, URISyntaxException, ServiceException, NoSuchAlgorithmException { NameNode nn = cluster.getNameNode(); System.out.println(nn.getHttpAddress().toString()); S3HdfsPath s3HdfsPath = testUtil.setUpS3HdfsPath("myBucket", "bigFile"); // Put new object byte[] data = new byte[SMALL_SIZE]; for (int i = 0; i < SMALL_SIZE; i++) { data[i] = (byte) (i % 256); }/*from w w w . j a v a 2 s .com*/ S3Object object = new S3Object(s3HdfsPath.getObjectName(), data); Map<String, Object> metaEntries = new HashMap<String, Object>(); metaEntries.put("scared", "yes"); metaEntries.put("tired", "yes"); metaEntries.put("hopeless", "never"); object.addAllMetadata(metaEntries); object.setMetadataComplete(true); s3Service.putObject(s3HdfsPath.getBucketName(), object); HttpClient httpClient = new HttpClient(); // Set up HttpGet and get response FileStatus fs = hdfs.getFileStatus(new Path(s3HdfsPath.getFullHdfsMetaPath())); assertTrue(fs.isFile()); assertTrue(fs.getPath().getName().equals(META_FILE_NAME)); String url = "http://" + hostName + ":" + PROXY_PORT + "/webhdfs/v1/s3hdfs/" + s3HdfsPath.getUserName() + "/myBucket/bigFile/" + DEFAULT_VERSION + "/" + META_FILE_NAME + "?op=OPEN"; GetMethod httpGet = new GetMethod(url); httpClient.executeMethod(httpGet); InputStream is = httpGet.getResponseBodyAsStream(); Properties retVal = testUtil.parseMap(is); System.out.println(retVal); // consume response and re-allocate connection httpGet.releaseConnection(); assert httpGet.getStatusCode() == 200; assert retVal.getProperty("x-amz-meta-scared").equals("yes"); assert retVal.getProperty("x-amz-meta-tired").equals("yes"); assert retVal.getProperty("x-amz-meta-hopeless").equals("never"); }