Example usage for java.net ServerSocket getInetAddress

List of usage examples for java.net ServerSocket getInetAddress

Introduction

In this page you can find the example usage for java.net ServerSocket getInetAddress.

Prototype

public InetAddress getInetAddress() 

Source Link

Document

Returns the local address of this server socket.

Usage

From source file:Main.java

/**
 * Renders the details of a server socket in the returned string
 * @param socket The server socket to render
 * @return the details of the server socket as a string
 *///from   w  w  w.  j  a  v  a 2 s .  c  o  m
public static String render(ServerSocket socket) {
    if (socket == null)
        return "NULL";
    StringBuilder b = new StringBuilder("\nSocket [");
    b.append("\n\tLocalPort:").append(socket.getLocalPort());
    b.append("\n\tLocalAddress:").append(socket.getInetAddress());
    b.append("\n\tLocalSocketAddress:").append(socket.getLocalSocketAddress());
    b.append("\n\tChannel:").append(socket.getChannel());
    b.append("\n\tHashCode:").append(socket.hashCode());
    b.append("\n]");
    return b.toString();
}

From source file:gov.hhs.fha.nhinc.lift.proxy.client.ClientConnectorManager.java

/**
 * This method will create a tunnel of a type defined by the properties
 * facade and will then bind a local temporary port for a client app to use
 * to communicate through the proxy tunnel.  Returns an address to the
 * local server a client can talk to.//from  w ww .  j av  a2  s.c o  m
 *
 * @param token
 * @param serverProxyAddress
 * @param serverProxyPort
 * @return
 * @throws IOException
 */
public InetSocketAddress startConnector(RequestToken token, InetAddress serverProxyAddress, int serverProxyPort,
        int bufferSize, ConsumerProxyPropertiesFacade props, SocketClientManagerController controller)
        throws IOException {
    /*
     * Attempts to start up a connection with the desired server proxy.
     */

    // Note that both client and server are closed when the thread completes
    log.debug("Creating Client instance to connect to server proxy: " + serverProxyAddress + ":"
            + serverProxyPort);
    Client client = props.getClientInstance(serverProxyAddress, serverProxyPort, token);

    /*
     * Start up a socket server bound to the local proxy hostname and to a
     * port unique to this request.
     */
    InetAddress localProxyAddress = props.getClientProxyAddress();
    log.debug("Local client proxy address set as: " + localProxyAddress);

    InetSocketAddress connectorAddress = new InetSocketAddress(localProxyAddress, 0);
    log.debug("Starting server socket for client to access on port: " + connectorAddress.getPort());

    // Note that both client and server are closed when the thread completes
    ServerSocket server = new ServerSocket();
    server.bind(connectorAddress);
    log.debug("Creating Server bound: " + server.getInetAddress() + ": " + server.getLocalPort());

    ClientConnector connector = new ClientConnector(server, client, bufferSize, controller);
    Thread conn = new Thread(connector);

    log.debug("Starting new Client Connector thread.");
    conn.start();

    return new InetSocketAddress(server.getInetAddress(), server.getLocalPort());
}

From source file:common.DataNode.java

/**
 * This method starts the data node with the specified conf.
 * /*from   w  ww.j  a  v a2 s .  c o  m*/
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs, DatanodeProtocol namenode)
        throws IOException {
    // use configured nameserver & interface to get local hostname
    if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) {
        machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    this.nameNodeAddr = NameNode.getAddress(conf);

    this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
            DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
    InetSocketAddress socAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.address", "0.0.0.0:50010"));
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = namenode;

    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils.newInstance(
                    Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // find free port
    ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
    Server.bind(ss, socAddr, 0);
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.threadGroup = new ThreadGroup("dataXceiverServer");
    this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.http.address", "0.0.0.0:50075"));
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf);
    if (conf.getBoolean("dfs.https.enable", false)) {
        boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
        InetSocketAddress secInfoSocAddr = NetUtils
                .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
        Configuration sslConf = new HdfsConfiguration(false);
        sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml"));
        this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
    }
    this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.setAttribute("datanode.conf", conf);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());

    // set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
    }

    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf);
    ipcServer.start();
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);

    plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
    for (ServicePlugin p : plugins) {
        try {
            p.start(this);
            LOG.info("Started plug-in " + p);
        } catch (Throwable t) {
            LOG.warn("ServicePlugin " + p + " could not be started", t);
        }
    }
}

From source file:org.apache.hadoop.dfs.DataNode.java

/**
 * This method starts the data node with the specified conf.
 * /*www  .  j  a  v  a  2 s  .  c  o m*/
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs) throws IOException {
    // use configured nameserver & interface to get local hostname
    if (conf.get("slave.host.name") != null) {
        machineName = conf.get("slave.host.name");
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    InetSocketAddress nameNodeAddr = NameNode.getAddress(conf);

    this.estimateBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
    this.socketTimeout = conf.getInt("dfs.socket.timeout", FSConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", FSConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024);
    String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port",
            "dfs.datanode.address");
    InetSocketAddress socAddr = NetUtils.createSocketAddr(address);
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID,
            nameNodeAddr, conf);
    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set("StorageId", dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils
                    .newInstance(Class.forName("org.apache.hadoop.dfs.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // find free port
    ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
    Server.bind(ss, socAddr, 0);
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    ss.setSoTimeout(conf.getInt("dfs.dataXceiver.timeoutInMS", 30000)); //30s
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.maxXceiverCount = conf.getInt("dfs.datanode.max.xcievers", MAX_XCEIVER_COUNT);
    this.threadGroup = new ThreadGroup("dataXceiveServer");
    this.dataXceiveServer = new Daemon(threadGroup, new DataXceiveServer(ss));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
    DataNode.nameNodeAddr = nameNodeAddr;

    this.balancingThrottler = new BlockBalanceThrottler(
            conf.getLong("dfs.balance.bandwidthPerSec", 1024L * 1024));

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    String infoAddr = NetUtils.getServerAddress(conf, "dfs.datanode.info.bindAddress", "dfs.datanode.info.port",
            "dfs.datanode.http.address");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new StatusHttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0);
    InetSocketAddress secInfoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
    Configuration sslConf = new Configuration(conf);
    sslConf.addResource(conf.get("https.keystore.info.rsrc", "sslinfo.xml"));
    String keyloc = sslConf.get("https.keystore.location");
    if (null != keyloc) {
        this.infoServer.addSslListener(secInfoSocAddr, keyloc, sslConf.get("https.keystore.password", ""),
                sslConf.get("https.keystore.keypassword", ""));
    }
    this.infoServer.addServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getStorageID());

    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf);
    ipcServer.start();
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);
}

From source file:org.apache.hadoop.hdfs.server.datanode.DataNode.java

/**
 * This method starts the data node with the specified conf.
 * /*from  w w  w .  j  ava  2 s .  c om*/
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 * @throws MalformedObjectNameException 
 * @throws MBeanRegistrationException 
 * @throws InstanceAlreadyExistsException 
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs, SecureResources resources)
        throws IOException {
    if (UserGroupInformation.isSecurityEnabled() && resources == null)
        throw new RuntimeException("Cannot start secure cluster without " + "privileged resources.");

    this.secureResources = resources;
    // use configured nameserver & interface to get local hostname
    if (conf.get("slave.host.name") != null) {
        machineName = conf.get("slave.host.name");
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    InetSocketAddress nameNodeAddr = NameNode.getServiceAddress(conf, true);

    this.socketTimeout = conf.getInt("dfs.socket.timeout", HdfsConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt("dfs.write.packet.size", 64 * 1024);

    InetSocketAddress socAddr = DataNode.getStreamingAddr(conf);
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = (DatanodeProtocol) RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID,
            nameNodeAddr, conf);
    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set("StorageId", dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils.newInstance(
                    Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // register datanode MXBean
    this.registerMXBean(conf); // register the MXBean for DataNode

    // Allow configuration to delay block reports to find bugs
    artificialBlockReceivedDelay = conf.getInt("dfs.datanode.artificialBlockReceivedDelay", 0);

    // find free port or use privileged port provide
    ServerSocket ss;
    if (secureResources == null) {
        ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
        Server.bind(ss, socAddr, 0);
    } else {
        ss = resources.getStreamingSocket();
    }
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.threadGroup = new ThreadGroup("dataXceiverServer");
    this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;
    DataNode.nameNodeAddr = nameNodeAddr;

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    InetSocketAddress infoSocAddr = DataNode.getInfoAddr(conf);
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = (secureResources == null)
            ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf,
                    SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN))
            : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf,
                    SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN), secureResources.getListener());
    if (conf.getBoolean("dfs.https.enable", false)) {
        boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
        InetSocketAddress secInfoSocAddr = NetUtils
                .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
        Configuration sslConf = new Configuration(false);
        sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml"));
        this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
    }
    this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class);

    this.infoServer.setAttribute("datanode", this);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);

    if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
        infoServer.addJerseyResourcePackage(
                DatanodeWebHdfsMethods.class.getPackage().getName() + ";" + Param.class.getPackage().getName(),
                WebHdfsFileSystem.PATH_PREFIX + "/*");
    }
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = DataNodeInstrumentation.create(conf, dnRegistration.getStorageID());

    // set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
    }

    // BlockTokenSecretManager is created here, but it shouldn't be
    // used until it is initialized in register().
    this.blockTokenSecretManager = new BlockTokenSecretManager(false, 0, 0);
    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf, blockTokenSecretManager);
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);
}

From source file:org.apache.isis.objectstore.nosql.db.file.server.FileServer.java

private void startService() {
    final String serviceHost = config.getString("fileserver.host", DEFAULT_HOST);
    final int servicePort = config.getInt("fileserver.port", DEFAULT_SERVICE_PORT);
    final int connectionTimeout = config.getInt("fileserver.connection.timeout", 5000);
    final int readTimeout = config.getInt("fileserver.read.timeout", 5000);

    ServerSocket socket = null;
    try {//from  w w  w.j a  v a  2  s  .com
        LOG.debug("setting up service socket on " + serviceHost + ":" + servicePort);
        final InetAddress address = InetAddress.getByName(serviceHost);
        socket = new ServerSocket(servicePort, BACKLOG, address);
        socket.setSoTimeout(connectionTimeout);
        LOG.info("file service listenting on " + socket.getInetAddress().getHostAddress() + " port "
                + socket.getLocalPort());
        LOG.debug("file service listenting on " + socket);
        final LogRange logFileRange = Util.logFileRange();
        if (!logFileRange.noLogFile()) {
            final long lastRecoveryFile = logFileRange.getLast();
            final File file = Util.logFile(lastRecoveryFile);
            LOG.info("replaying last recovery file: " + file.getAbsolutePath());
            recover(file);
        }
        server.startup();
    } catch (final UnknownHostException e) {
        LOG.error("Unknown host " + serviceHost, e);
        System.exit(0);
    } catch (final IOException e) {
        LOG.error("start failure - networking not set up for " + serviceHost, e);
        System.exit(0);
    } catch (final RuntimeException e) {
        LOG.error("start failure", e);
        System.exit(0);
    }
    do {
        try {
            while (isQuiescent) {
                try {
                    Thread.sleep(300);
                } catch (final InterruptedException ignore) {
                }
            }
            final Socket connection = socket.accept();
            LOG.debug("connection from " + connection);
            connection.setSoTimeout(readTimeout);
            serviceConnection(connection, readTimeout);
        } catch (final SocketTimeoutException expected) {
        } catch (final IOException e) {
            LOG.error("networking problem", e);
        }
    } while (awaitConnections);
}

From source file:org.apache.isis.objectstore.nosql.db.file.server.FileServer.java

private void startControl() {
    final String controlHost = config.getString("fileserver.control-host", DEFAULT_HOST);
    final int controlPort = config.getInt("fileserver.control-port", DEFAULT_CONTROL_PORT);
    final int connectionTimeout = config.getInt("fileserver.connection.timeout", 5000);

    ServerSocket socket = null;
    try {/*from  w  ww . ja v  a 2s  . c  om*/
        LOG.debug("setting up control socket on " + controlHost + ":" + controlPort);
        final InetAddress address = InetAddress.getByName(controlHost);
        socket = new ServerSocket(controlPort, 0, address);
        socket.setSoTimeout(connectionTimeout);
        LOG.info("file control listenting on " + socket.getInetAddress().getHostAddress() + " port "
                + socket.getLocalPort());
        LOG.debug("file control listenting on " + socket);
    } catch (final UnknownHostException e) {
        LOG.error("Unknown host " + controlHost, e);
        System.exit(0);
    } catch (final IOException e) {
        LOG.error("start failure - networking not set up for " + controlHost, e);
        System.exit(0);
    } catch (final RuntimeException e) {
        LOG.error("start failure", e);
        System.exit(0);
    }
    do {
        try {
            final Socket connection = socket.accept();
            LOG.info("control connection from " + connection);
            controlConnection(connection);
        } catch (final SocketTimeoutException expected) {
        } catch (final IOException e) {
            LOG.error("networking problem", e);
        }
    } while (awaitConnections);
}

From source file:org.apache.isis.objectstore.nosql.db.file.server.FileServer.java

private void startSecondary() {
    final String serviceHost = config.getString("fileserver.sync-host", DEFAULT_HOST);
    final int servicePort = config.getInt("fileserver.sync-port", DEFAULT_SYNC_PORT);

    Util.ensureDirectoryExists();
    ServerSocket socket = null;
    try {//from   w w  w  . ja  v a2  s  .  co  m
        LOG.debug("setting up syncing socket on " + serviceHost + ":" + servicePort);
        final InetAddress address = InetAddress.getByName(serviceHost);
        socket = new ServerSocket(servicePort, 0, address);
        LOG.info(
                "listenting on " + socket.getInetAddress().getHostAddress() + " port " + socket.getLocalPort());
        LOG.debug("listenting on " + socket);
        do {
            syncConnection(socket.accept(), 0);
        } while (awaitConnections);
    } catch (final UnknownHostException e) {
        LOG.error("Unknown host " + serviceHost, e);
        System.exit(0);
    } catch (final IOException e) {
        LOG.error("start failure - networking not set up for " + serviceHost, e);
        System.exit(0);
    } catch (final RuntimeException e) {
        LOG.error("start failure", e);
        System.exit(0);
    }
}