Example usage for org.apache.hadoop.net DNS getDefaultHost

List of usage examples for org.apache.hadoop.net DNS getDefaultHost

Introduction

In this page you can find the example usage for org.apache.hadoop.net DNS getDefaultHost.

Prototype

public static String getDefaultHost(@Nullable String strInterface, @Nullable String nameserver)
        throws UnknownHostException 

Source Link

Document

Returns the default (first) host name associated by the provided nameserver with the address bound to the specified network interface.

Usage

From source file:com.alibaba.wasp.fserver.FServer.java

License:Apache License

/**
 * Starts a FServer at the default location
 * /*  w  ww . j  a v  a2s  .c  om*/
 * @param conf
 * @throws java.io.IOException
 * @throws InterruptedException
 */
public FServer(Configuration conf) throws IOException, InterruptedException {
    this.conf = conf;
    this.isOnline = false;
    // Set how many times to retry talking to another server over FConnection.
    FConnectionManager.setServerSideFConnectionRetries(this.conf, LOG);

    // Config'ed params
    this.msgInterval = conf.getInt("wasp.fserver.msginterval", 3 * 1000);

    this.sleeper = new Sleeper(this.msgInterval, this);

    this.numEntityGroupsToReport = conf.getInt("wasp.fserver.numentitygroupstoreport", 10);

    this.rpcTimeout = conf.getInt(FConstants.WASP_RPC_TIMEOUT_KEY, FConstants.DEFAULT_WASP_RPC_TIMEOUT);

    this.abortRequested = false;
    this.stopped = false;
    this.actionManager = new StorageActionManager(conf);

    // Server to handle client requests.
    String hostname = Strings
            .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.fserver.dns.interface", "default"),
                    conf.get("wasp.fserver.dns.nameserver", "default")));
    int port = conf.getInt(FConstants.FSERVER_PORT, FConstants.DEFAULT_FSERVER_PORT);
    // Creation of a HSA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }

    this.rpcServer = WaspRPC.getServer(FServer.class, this,
            new Class<?>[] { ClientProtocol.class, AdminProtocol.class, WaspRPCErrorHandler.class,
                    OnlineEntityGroups.class },
            initialIsa.getHostName(), // BindAddress is
            // IP we got for
            // this server.
            initialIsa.getPort(), conf);
    // Set our address.
    this.isa = this.rpcServer.getListenerAddress();

    this.leases = new Leases(conf.getInt(FConstants.THREAD_WAKE_FREQUENCY, 10 * 1000));

    this.startcode = System.currentTimeMillis();

    int maxThreads = conf.getInt("wasp.transaction.threads.max", 150);

    this.pool = new ThreadPoolExecutor(1, maxThreads, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(),
            new DaemonThreadFactory("thread factory"));
    ((ThreadPoolExecutor) this.pool).allowCoreThreadTimeOut(true);

    this.scannerLeaseTimeoutPeriod = conf.getInt(FConstants.WASP_CLIENT_SCANNER_TIMEOUT_PERIOD,
            FConstants.DEFAULT_WASP_CLIENT_SCANNER_TIMEOUT_PERIOD);

    this.driver = new BaseDriver(this);
    this.splitThread = new SplitThread(this);
    this.globalEntityGroup = new GlobalEntityGroup(this);
}

From source file:com.alibaba.wasp.master.FMaster.java

License:Apache License

/**
 * Initializes the FMaster. The steps are as follows:
 * <p>/*from   ww w .j  a  v  a2  s. c  o m*/
 * <ol>
 * <li>Initialize FMaster RPC and address
 * <li>Connect to ZooKeeper.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in {@link #run()} so that they run
 * in their own thread rather than within the context of the constructor.
 * 
 * @throws InterruptedException
 */
public FMaster(final Configuration conf) throws IOException, KeeperException, InterruptedException {
    this.conf = new Configuration(conf);
    // Set how many times to retry talking to another server over HConnection.
    FConnectionManager.setServerSideFConnectionRetries(this.conf, LOG);
    // Server to handle client requests.
    String hostname = Strings
            .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.master.dns.interface", "default"),
                    conf.get("wasp.master.dns.nameserver", "default")));
    int port = conf.getInt(FConstants.MASTER_PORT, FConstants.DEFAULT_MASTER_PORT);
    // Creation of a ISA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }
    this.rpcServer = WaspRPC.getServer(FMaster.class, this,
            new Class<?>[] { FMasterMonitorProtocol.class, FMasterAdminProtocol.class,
                    FServerStatusProtocol.class, FMetaServerProtocol.class },
            initialIsa.getHostName(), // BindAddress is IP we got for this server.
            initialIsa.getPort(), conf);
    // Set our address.
    this.isa = this.rpcServer.getListenerAddress();
    this.serverName = new ServerName(this.isa.getHostName(), this.isa.getPort(), System.currentTimeMillis());

    // set the thread name now we have an address
    setName(MASTER + "-" + this.serverName.toString());

    this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true);

    // metrics interval: using the same property as fserver.
    this.msgInterval = conf.getInt("wasp.fserver.msginterval", 3 * 1000);

    this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
}

From source file:com.alibaba.wasp.zookeeper.FQuorumPeer.java

License:Apache License

static void writeMyID(Configuration conf, Properties properties) throws IOException {
    long myId = -1;
    String myAddress = Strings/*from  w  w w .j a  v a2  s .com*/
            .domainNamePointerToHostName(DNS.getDefaultHost(conf.get("wasp.zookeeper.dns.interface", "default"),
                    conf.get("wasp.zookeeper.dns.nameserver", "default")));

    List<String> ips = new ArrayList<String>();

    // Add what could be the best (configured) match
    ips.add(myAddress.contains(".") ? myAddress : StringUtils.simpleHostname(myAddress));

    // For all nics get all hostnames and IPs
    Enumeration<?> nics = NetworkInterface.getNetworkInterfaces();
    while (nics.hasMoreElements()) {
        Enumeration<?> rawAdrs = ((NetworkInterface) nics.nextElement()).getInetAddresses();
        while (rawAdrs.hasMoreElements()) {
            InetAddress inet = (InetAddress) rawAdrs.nextElement();
            ips.add(StringUtils.simpleHostname(inet.getHostName()));
            ips.add(inet.getHostAddress());
        }
    }

    for (Entry<Object, Object> entry : properties.entrySet()) {
        String key = entry.getKey().toString().trim();
        String value = entry.getValue().toString().trim();
        if (key.startsWith("server.")) {
            int dot = key.indexOf('.');
            long id = Long.parseLong(key.substring(dot + 1));
            String[] parts = value.split(":");
            String address = parts[0];
            if (addressIsLocalHost(address) || ips.contains(address)) {
                myId = id;
                break;
            }
        }
    }

    // Set the max session timeout from the provided client-side timeout
    properties.setProperty("maxSessionTimeout", conf.get("zookeeper.session.timeout", "180000"));

    if (myId == -1) {
        throw new IOException(
                "Could not find my address: " + myAddress + " in list of ZooKeeper quorum servers");
    }

    String dataDirStr = properties.get("dataDir").toString().trim();
    File dataDir = new File(dataDirStr);
    if (!dataDir.isDirectory()) {
        if (!dataDir.mkdirs()) {
            throw new IOException("Unable to create data dir " + dataDir);
        }
    }

    File myIdFile = new File(dataDir, "myid");
    PrintWriter w = new PrintWriter(myIdFile);
    w.println(myId);
    w.close();
}

From source file:com.chinamobile.bcbsp.workermanager.WorkerManager.java

License:Apache License

/**
 * Initialize workerManager./*from   www  .j  a v a 2 s . c  om*/
 */
@SuppressWarnings("static-access")
public synchronized void initialize() throws IOException {
    if (this.conf.get(Constants.BC_BSP_WORKERMANAGER_RPC_HOST) != null) {
        this.workerManagerName = conf.get(Constants.BC_BSP_WORKERMANAGER_RPC_HOST);
    }
    if (this.workerManagerName == null) {
        this.workerManagerName = DNS.getDefaultHost(conf.get("bsp.dns.interface", "default"),
                conf.get("bsp.dns.nameserver", "default"));
    }
    // check local disk
    checkLocalDirs(conf.getStrings(Constants.BC_BSP_LOCAL_DIRECTORY));
    deleteLocalFiles("workerManager");
    this.workerFaultList = new ArrayList<Fault>();
    this.reportStaffStatusList = new ArrayList<StaffStatus>();
    this.runningStaffs = new ConcurrentHashMap<StaffAttemptID, StaffInProgress>();
    this.finishedStaffs = new ConcurrentHashMap<StaffAttemptID, StaffInProgress>();
    this.runningJobs = new ConcurrentHashMap<BSPJobID, RunningJob>();
    this.finishedJobs = new ConcurrentHashMap<BSPJobID, RunningJob>();
    this.runningJobtoWorkerAgent = new ConcurrentHashMap<BSPJobID, WorkerAgentForJob>();
    this.reprotStaffsMap = new ConcurrentHashMap<StaffAttemptID, StaffInProgress>();
    this.conf.set(Constants.BC_BSP_WORKERAGENT_HOST, this.workerManagerName);
    this.conf.set(Constants.BC_BSP_WORKERMANAGER_RPC_HOST, this.workerManagerName);
    this.maxStaffsCount = conf.getInt(Constants.BC_BSP_WORKERMANAGER_MAXSTAFFS, 1);
    WorkerManager.HEART_BEAT_INTERVAL = conf.getInt(Constants.HEART_BEAT_INTERVAL, 1000);
    LOG.info("The max number of staffs is : " + this.maxStaffsCount);
    int rpcPort = -1;
    String rpcAddr = null;
    if (!this.initialized) {
        rpcAddr = conf.get(Constants.BC_BSP_WORKERMANAGER_RPC_HOST,
                Constants.DEFAULT_BC_BSP_WORKERMANAGER_RPC_HOST);
        rpcPort = conf.getInt(Constants.BC_BSP_WORKERMANAGER_RPC_PORT, 5000);
        if (-1 == rpcPort || null == rpcAddr) {
            throw new IllegalArgumentException("Error rpc address " + rpcAddr + " port" + rpcPort);
        }
        this.workerServer = RPC.getServer(this, rpcAddr, rpcPort, conf);
        this.workerServer.start();
        this.rpcServer = rpcAddr + ":" + rpcPort;
        LOG.info("Worker rpc server --> " + rpcServer);
    }
    String address = conf.get(Constants.BC_BSP_WORKERMANAGER_REPORT_ADDRESS);
    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
    String bindAddress = socAddr.getHostName();
    int tmpPort = socAddr.getPort();
    // RPC initialization
    this.staffReportServer = RPC.getServer(this, bindAddress, tmpPort, 10, false, this.conf);
    this.staffReportServer.start();
    // http server
    InetAddress addr = InetAddress.getLocalHost();
    String ipSlave = addr.getHostAddress().toString();
    winfoPort = conf.getInt("bcbsp.http.winfoserver.port", 40027);
    winfoServer = new HttpServer("bcbsp", ipSlave, winfoPort, true, conf);
    winfoServer.setAttribute("WorkerManager", this);
    LOG.info("prot: 40027");
    LOG.info("ljn test : controllerClient before start ");
    winfoServer.start();
    LOG.info("server has started");
    LOG.info("ljn test : controllerClient before register ");
    // get the assigned address
    this.staffReportAddress = staffReportServer.getListenerAddress();
    LOG.info("WorkerManager up at: " + this.staffReportAddress);
    DistributedCache.purgeCache(this.conf);
    LOG.info("ljn test : DistributedCache ");
    LOG.info("ljn test : bspControllerAddr " + bspControllerAddr);
    LOG.info("ljn test : BSPRPCProtocolVersion.versionID " + BSPRPCProtocolVersion.versionID);
    // establish the communication link to bsp master
    try {
        this.controllerClient = (ControllerProtocol) RPC.waitForProxy(ControllerProtocol.class,
                BSPRPCProtocolVersion.versionID, bspControllerAddr, conf);
    } catch (Exception e) {
        throw new RuntimeException(e);
    }
    LOG.info("ljn test : controllerClient controllerClient start ");
    // establish the communication link to standby bsp master
    if ("ha".equals(conf.get(Constants.BC_BSP_HA_FLAG, ""))) {
        this.standbyControllerClient = (ControllerProtocol) RPC.waitForProxy(ControllerProtocol.class,
                BSPRPCProtocolVersion.versionID, this.standbyControllerAddr, conf);
    }
    LOG.info("bspControllerAddr = " + bspControllerAddr + " standbyControllerAddr = " + standbyControllerAddr);
    LOG.info("ljn test : controllerClient before register ");
    // enroll in bsp controller
    if (-1 == rpcPort || null == rpcAddr) {
        throw new IllegalArgumentException("Error rpc address " + rpcAddr + " port" + rpcPort);
    }
    LOG.info("ljn test : controllerClient before lsManager.start ");
    this.lsManager.start();
    LOG.info("ljn test : controllerClient before lsManager.start  over");
    workerMangerStatus = new WorkerManagerStatus(workerManagerName, cloneAndResetRunningStaffStatuses(),
            maxStaffsCount, currentStaffsCount, finishedStaffsCount, failures, this.rpcServer, workerFaultList);
    this.workerMangerStatus.setHost(bindAddress);
    this.workerMangerStatus.setHttpPort(this.staffReportAddress.toString());
    this.workerMangerStatus.setLocalIp(ipSlave);
    LOG.info("ljn test : controllerClient before register ");
    if (!this.controllerClient.register(workerMangerStatus)) {
        LOG.error("There is a problem in establishing communication" + " link with BSPController");
        throw new IOException("There is a problem in establishing" + " communication link with BSPController.");
    } else {
        LOG.info("have registed to bsp master");
    }
    if ("ha".equals(conf.get(Constants.BC_BSP_HA_FLAG, ""))) {
        if (!this.standbyControllerClient.register(workerMangerStatus)) {
            LOG.error("There is a problem in establishing communication" + " link with BSPController");
            throw new IOException(
                    "There is a problem in establishing" + " communication link with BSPController.");
        } else {
            LOG.info("have registed to standby bsp master");
        }
    }
    this.running = true;
    this.initialized = true;
    LOG.info("ljn test : controllerClient after register ");
}

From source file:com.hortonworks.hbase.replication.bridge.ReplicationBridgeServer.java

License:Apache License

/**
 * Starts a HRegionServer at the default location
 *
 * @param conf//w ww  .j a  v  a2 s  .  c o m
 * @throws IOException
 * @throws InterruptedException
 * @throws KeeperException 
 * @throws ZkConnectException 
 */
public ReplicationBridgeServer(Configuration conf) throws IOException, InterruptedException, KeeperException {
    this.conf = conf;

    // Set how many times to retry talking to another server over HConnection.
    HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);

    // Server to handle client requests.
    String hostname = conf.get("hbase.regionserver.ipc.address",
            Strings.domainNamePointerToHostName(
                    DNS.getDefaultHost(conf.get("hbase.regionserver.dns.interface", "default"),
                            conf.get("hbase.regionserver.dns.nameserver", "default"))));
    port = conf.getInt("hbase.bridge.server.port", BRIDGE_SERVER_PORT);
    // Creation of a HSA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }

    this.rpcServer = HBaseRPC.getServer(this, new Class<?>[] { HRegionInterface.class },
            initialIsa.getHostName(), // BindAddress is IP we got for this server.
            initialIsa.getPort(), conf.getInt("hbase.regionserver.handler.count", 10),
            conf.getInt("hbase.regionserver.metahandler.count", 10),
            conf.getBoolean("hbase.rpc.verbose", false), conf, HConstants.QOS_THRESHOLD);
}

From source file:com.ngdata.hbaseindexer.Main.java

License:Apache License

/**
 * @param conf the configuration object containing the hbase-indexer configuration, as well
 *             as the hbase/hadoop settings. Typically created using {@link HBaseIndexerConfiguration}.
 *///w ww . j a va  2  s.co m
public void startServices(Configuration conf) throws Exception {
    String hostname = Strings.domainNamePointerToHostName(
            DNS.getDefaultHost(conf.get("hbase.regionserver.dns.interface", "default"),
                    conf.get("hbase.regionserver.dns.nameserver", "default")));

    log.debug("Using hostname " + hostname);

    String zkConnectString = conf.get(ConfKeys.ZK_CONNECT_STRING);
    int zkSessionTimeout = conf.getInt(ConfKeys.ZK_SESSION_TIMEOUT, 30000);
    zk = new StateWatchingZooKeeper(zkConnectString, zkSessionTimeout);

    tablePool = new HTablePool(conf, 10 /* TODO configurable */);

    String zkRoot = conf.get(ConfKeys.ZK_ROOT_NODE);

    indexerModel = new IndexerModelImpl(zk, zkRoot);

    sepModel = new SepModelImpl(zk, conf);

    indexerMaster = new IndexerMaster(zk, indexerModel, conf, conf, zkConnectString, sepModel);
    indexerMaster.start();

    IndexerRegistry indexerRegistry = new IndexerRegistry();
    IndexerProcessRegistry indexerProcessRegistry = new IndexerProcessRegistryImpl(zk, conf);
    indexerSupervisor = new IndexerSupervisor(indexerModel, zk, hostname, indexerRegistry,
            indexerProcessRegistry, tablePool, conf);

    indexerSupervisor.init();
    startHttpServer();

}

From source file:common.DataNode.java

License:Apache License

/**
 * This method starts the data node with the specified conf.
 * /*  w w w .j a v a2 s . c o m*/
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs, DatanodeProtocol namenode)
        throws IOException {
    // use configured nameserver & interface to get local hostname
    if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) {
        machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    this.nameNodeAddr = NameNode.getAddress(conf);

    this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
            DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
    InetSocketAddress socAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.address", "0.0.0.0:50010"));
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = namenode;

    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils.newInstance(
                    Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // find free port
    ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
    Server.bind(ss, socAddr, 0);
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.threadGroup = new ThreadGroup("dataXceiverServer");
    this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.http.address", "0.0.0.0:50075"));
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf);
    if (conf.getBoolean("dfs.https.enable", false)) {
        boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
        InetSocketAddress secInfoSocAddr = NetUtils
                .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
        Configuration sslConf = new HdfsConfiguration(false);
        sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml"));
        this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
    }
    this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.setAttribute("datanode.conf", conf);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());

    // set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
    }

    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf);
    ipcServer.start();
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);

    plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
    for (ServicePlugin p : plugins) {
        try {
            p.start(this);
            LOG.info("Started plug-in " + p);
        } catch (Throwable t) {
            LOG.warn("ServicePlugin " + p + " could not be started", t);
        }
    }
}

From source file:io.hops.experiments.benchmarks.blockreporting.TinyDatanode.java

License:Apache License

void register(boolean isDataNodePopulated) throws Exception {
    List<BlockReportingNameNodeHandle> namenodes = nameNodeSelector.getNameNodes();
    // get versions from the namenode
    nsInfo = namenodes.get(0).getDataNodeRPC().versionRequest();
    dnRegistration = new DatanodeRegistration(
            new DatanodeID(DNS.getDefaultIP("default"), DNS.getDefaultHost("default", "default"), "",
                    getNodePort(dnIdx), DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
                    DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
            new DataStorage(nsInfo, ""), new ExportedBlockKeys(), VersionInfo.getVersion());
    dnRegistration.setStorageID(createNewStorageId(dnRegistration.getXferPort(), dnIdx));
    // register datanode
    for (BlockReportingNameNodeHandle nn : namenodes) {
        dnRegistration = nn.getDataNodeRPC().registerDatanode(dnRegistration);
    }//from  www . j  ava2s.  c  om
    //first block reports
    storage = new DatanodeStorage(dnRegistration.getStorageID());
    if (!isDataNodePopulated) {
        firstBlockReport(new BlockListAsLongs(null, null).getBlockListAsLongs());
    }
}

From source file:net.arp7.TestHadoopDNS.GetReverseHostName.java

License:Apache License

public static void main(String[] argv) throws UnknownHostException {
    if (argv.length != 1) {
        System.err.println("  Usage: GetReverseHostName <interface-name>");
        System.exit(1);/*  ww w  . j a  v a2  s. co m*/
    }

    final String hostname = DNS.getDefaultHost(argv[0], "default");
    // final String hostname = DNS.getDefaultHost(argv[0], "default", true);  -- Enable for 2.8.0 or later.
    System.out.println(" >> Mapped " + argv[0] + " to hostname " + hostname);
}

From source file:org.apache.giraph.conf.GiraphConfiguration.java

License:Apache License

/**
 * Get the local hostname on the given interface.
 *
 * @return The local hostname/*from w w  w.j  a  v  a 2 s  .  c o  m*/
 * @throws UnknownHostException
 */
public String getLocalHostname() throws UnknownHostException {
    return DNS.getDefaultHost(GiraphConstants.DNS_INTERFACE.get(this), GiraphConstants.DNS_NAMESERVER.get(this))
            .toLowerCase();
}