Example usage for java.net ServerSocket ServerSocket

List of usage examples for java.net ServerSocket ServerSocket

Introduction

In this page you can find the example usage for java.net ServerSocket ServerSocket.

Prototype

public ServerSocket() throws IOException 

Source Link

Document

Creates an unbound server socket.

Usage

From source file:common.DataNode.java

/**
 * This method starts the data node with the specified conf.
 * /*from   w w w  . j  a va2  s .  com*/
 * @param conf - the configuration
 *  if conf's CONFIG_PROPERTY_SIMULATED property is set
 *  then a simulated storage based data node is created.
 * 
 * @param dataDirs - only for a non-simulated storage data node
 * @throws IOException
 */
void startDataNode(Configuration conf, AbstractList<File> dataDirs, DatanodeProtocol namenode)
        throws IOException {
    // use configured nameserver & interface to get local hostname
    if (conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY) != null) {
        machineName = conf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY);
    }
    if (machineName == null) {
        machineName = DNS.getDefaultHost(conf.get("dfs.datanode.dns.interface", "default"),
                conf.get("dfs.datanode.dns.nameserver", "default"));
    }
    this.nameNodeAddr = NameNode.getAddress(conf);

    this.socketTimeout = conf.getInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, HdfsConstants.READ_TIMEOUT);
    this.socketWriteTimeout = conf.getInt("dfs.datanode.socket.write.timeout", HdfsConstants.WRITE_TIMEOUT);
    /* Based on results on different platforms, we might need set the default 
     * to false on some of them. */
    this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed", true);
    this.writePacketSize = conf.getInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
            DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
    InetSocketAddress socAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.address", "0.0.0.0:50010"));
    int tmpPort = socAddr.getPort();
    storage = new DataStorage();
    // construct registration
    this.dnRegistration = new DatanodeRegistration(machineName + ":" + tmpPort);

    // connect to name node
    this.namenode = namenode;

    // get version and id info from the name-node
    NamespaceInfo nsInfo = handshake();
    StartupOption startOpt = getStartupOption(conf);
    assert startOpt != null : "Startup option must be set.";

    boolean simulatedFSDataset = conf.getBoolean("dfs.datanode.simulateddatastorage", false);
    if (simulatedFSDataset) {
        setNewStorageID(dnRegistration);
        dnRegistration.storageInfo.layoutVersion = FSConstants.LAYOUT_VERSION;
        dnRegistration.storageInfo.namespaceID = nsInfo.namespaceID;
        // it would have been better to pass storage as a parameter to
        // constructor below - need to augment ReflectionUtils used below.
        conf.set(DFSConfigKeys.DFS_DATANODE_STORAGEID_KEY, dnRegistration.getStorageID());
        try {
            //Equivalent of following (can't do because Simulated is in test dir)
            //  this.data = new SimulatedFSDataset(conf);
            this.data = (FSDatasetInterface) ReflectionUtils.newInstance(
                    Class.forName("org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset"), conf);
        } catch (ClassNotFoundException e) {
            throw new IOException(StringUtils.stringifyException(e));
        }
    } else { // real storage
        // read storage info, lock data dirs and transition fs state if necessary
        storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
        // adjust
        this.dnRegistration.setStorageInfo(storage);
        // initialize data node internal structure
        this.data = new FSDataset(storage, conf);
    }

    // find free port
    ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket();
    Server.bind(ss, socAddr, 0);
    ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
    // adjust machine name with the actual port
    tmpPort = ss.getLocalPort();
    selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort);
    this.dnRegistration.setName(machineName + ":" + tmpPort);
    LOG.info("Opened info server at " + tmpPort);

    this.threadGroup = new ThreadGroup("dataXceiverServer");
    this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this));
    this.threadGroup.setDaemon(true); // auto destroy when empty

    this.blockReportInterval = conf.getLong("dfs.blockreport.intervalMsec", BLOCKREPORT_INTERVAL);
    this.initialBlockReportDelay = conf.getLong("dfs.blockreport.initialDelay", BLOCKREPORT_INITIAL_DELAY)
            * 1000L;
    if (this.initialBlockReportDelay >= blockReportInterval) {
        this.initialBlockReportDelay = 0;
        LOG.info("dfs.blockreport.initialDelay is greater than " + "dfs.blockreport.intervalMsec."
                + " Setting initial delay to 0 msec:");
    }
    this.heartBeatInterval = conf.getLong("dfs.heartbeat.interval", HEARTBEAT_INTERVAL) * 1000L;

    //initialize periodic block scanner
    String reason = null;
    if (conf.getInt("dfs.datanode.scan.period.hours", 0) < 0) {
        reason = "verification is turned off by configuration";
    } else if (!(data instanceof FSDataset)) {
        reason = "verifcation is supported only with FSDataset";
    }
    if (reason == null) {
        blockScanner = new DataBlockScanner(this, (FSDataset) data, conf);
    } else {
        LOG.info("Periodic Block Verification is disabled because " + reason + ".");
    }

    //create a servlet to serve full-file content
    InetSocketAddress infoSocAddr = NetUtils
            .createSocketAddr(conf.get("dfs.datanode.http.address", "0.0.0.0:50075"));
    String infoHost = infoSocAddr.getHostName();
    int tmpInfoPort = infoSocAddr.getPort();
    this.infoServer = new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, conf);
    if (conf.getBoolean("dfs.https.enable", false)) {
        boolean needClientAuth = conf.getBoolean(DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
                DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
        InetSocketAddress secInfoSocAddr = NetUtils
                .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 0));
        Configuration sslConf = new HdfsConfiguration(false);
        sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml"));
        this.infoServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
    }
    this.infoServer.addInternalServlet(null, "/streamFile/*", StreamFile.class);
    this.infoServer.addInternalServlet(null, "/getFileChecksum/*", FileChecksumServlets.GetServlet.class);
    this.infoServer.setAttribute("datanode.blockScanner", blockScanner);
    this.infoServer.setAttribute("datanode.conf", conf);
    this.infoServer.addServlet(null, "/blockScannerReport", DataBlockScanner.Servlet.class);
    this.infoServer.start();
    // adjust info port
    this.dnRegistration.setInfoPort(this.infoServer.getPort());
    myMetrics = new DataNodeMetrics(conf, dnRegistration.getName());

    // set service-level authorization security policy
    if (conf.getBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, false)) {
        ServiceAuthorizationManager.refresh(conf, new HDFSPolicyProvider());
    }

    //init ipc server
    InetSocketAddress ipcAddr = NetUtils.createSocketAddr(conf.get("dfs.datanode.ipc.address"));
    ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(), ipcAddr.getPort(),
            conf.getInt("dfs.datanode.handler.count", 3), false, conf);
    ipcServer.start();
    dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());

    LOG.info("dnRegistration = " + dnRegistration);

    plugins = conf.getInstances("dfs.datanode.plugins", ServicePlugin.class);
    for (ServicePlugin p : plugins) {
        try {
            p.start(this);
            LOG.info("Started plug-in " + p);
        } catch (Throwable t) {
            LOG.warn("ServicePlugin " + p + " could not be started", t);
        }
    }
}

From source file:com.addthis.hydra.task.output.tree.TreeMapper.java

private void _init(TaskRunConfig runConfig) throws Exception {
    config = runConfig;//from   w  w  w .j a v  a2s .  c  om
    mapstats = new TreeMapperStats(log);

    resolve();

    if (nodeCache != null)
        TreeCommonParameters.setDefaultCleanQueueSize(nodeCache);
    if (trashInterval != null)
        TreeCommonParameters.setDefaultTrashInterval(trashInterval);
    if (trashTimeLimit != null)
        TreeCommonParameters.setDefaultTrashTimeLimit(trashTimeLimit);
    if (storage != null) {
        if (storage.maxCacheSize != null)
            TreeCommonParameters.setDefaultMaxCacheSize(storage.maxCacheSize);
        if (storage.maxCacheMem != null)
            TreeCommonParameters.setDefaultMaxCacheMem(storage.maxCacheMem);
        if (storage.maxPageSize != null)
            TreeCommonParameters.setDefaultMaxPageSize(storage.maxCacheSize);
        if (storage.maxPageMem != null)
            TreeCommonParameters.setDefaultMaxPageMem(storage.maxPageMem);
        if (storage.memSample != null)
            TreeCommonParameters.setDefaultMemSample(storage.memSample);
    }

    if (Strings.isEmpty(localhost)) {
        localhost = InetAddress.getLocalHost().getHostAddress();
    }
    log.info("[init] java=" + System.getProperty("java.vm.version") + " query=" + enableQuery + " http="
            + enableHttp + " jmx=" + enableJmx + " live=" + live);
    log.info("[init] host=" + localhost + " port=" + port + " target=" + root + " job=" + config.jobId);

    Path treePath = Paths.get(runConfig.dir, "data");
    tree = new ConcurrentTree(Files.initDirectory(treePath.toFile()));
    bench = new Bench(EnumSet.allOf(BENCH.class), 1000);

    if (enableHttp) {
        jetty = new Server(port > 0 ? port++ : 0);
        jetty.start();
        int httpPort = jetty.getConnectors()[0].getLocalPort();
        log.info("[init.http] http://" + localhost + ":" + httpPort + "/");
        Files.write(new File("job.port"), Bytes.toBytes(Integer.toString(httpPort)), false);
    }

    if (enableJmx) {
        int queryPort = 0;
        jmxname = new ObjectName("com.addthis.hydra:type=Hydra,node=" + queryPort);
        ManagementFactory.getPlatformMBeanServer().registerMBean(mapstats, jmxname);
        ServerSocket ss = new ServerSocket();
        ss.setReuseAddress(true);
        ss.bind(port > 0 ? new InetSocketAddress(port++) : null);
        int jmxport = ss.getLocalPort();
        ss.close();
        if (jmxport == -1) {
            log.warn("[init.jmx] failed to get a port");
        } else {
            try {
                jmxremote = new MBeanRemotingSupport(jmxport);
                jmxremote.start();
                log.info("[init.jmx] port=" + jmxport);
            } catch (Exception e) {
                log.warn("[init.jmx] err=" + e);
            }
        }
    }

    if (config.jobId != null && live && livePort > -1) {
        QueryEngine liveQueryEngine = new QueryEngine(tree);
        connectToMesh(treePath.toFile(), runConfig.jobId, liveQueryEngine);
    }

    startTime = System.currentTimeMillis();

    if (pre != null) {
        log.warn("pre-chain: " + pre);
        processBundle(new KVBundle(), pre);
    }
}

From source file:org.springframework.boot.actuate.autoconfigure.EndpointWebMvcAutoConfigurationTests.java

@Test
public void specificPortsViaPropertiesWithClash() throws Exception {
    int managementPort = ports.get().management;
    ServerSocket serverSocket = new ServerSocket();
    serverSocket.bind(new InetSocketAddress(managementPort));
    try {//w  w  w. j a v  a2 s.  c  om
        EnvironmentTestUtils.addEnvironment(this.applicationContext, "server.port:" + ports.get().server,
                "management.port:" + ports.get().management);
        this.applicationContext.register(RootConfig.class, EndpointConfig.class, BaseConfiguration.class,
                EndpointWebMvcAutoConfiguration.class, ErrorMvcAutoConfiguration.class);
        this.thrown.expect(WebServerException.class);
        this.applicationContext.refresh();
    } finally {
        serverSocket.close();
    }
}

From source file:de.uniluebeck.itm.spyglass.gui.wizard.WisebedPacketReaderConfigurationWizard.java

private boolean checkIfServerSocketCanBeOpened(String host, int port) {
    try {/*from w ww .  j a va 2 s .c om*/
        ServerSocket socket = new ServerSocket();
        socket.bind(new InetSocketAddress(host, port));
        socket.close();
        return true;
    } catch (IOException e) {
        return false;
    }
}

From source file:test.java.com.spotify.docker.client.DefaultDockerClientTest.java

@Test(expected = DockerTimeoutException.class)
public void testReadTimeout() throws Exception {
    try (final ServerSocket s = new ServerSocket()) {
        // Bind and listen but do not accept -> read will time out.
        s.bind(new InetSocketAddress("127.0.0.1", 0));
        awaitConnectable(s.getInetAddress(), s.getLocalPort());
        final DockerClient connectTimeoutClient = DefaultDockerClient.builder()
                .uri("http://127.0.0.1:" + s.getLocalPort()).connectTimeoutMillis(NO_TIMEOUT)
                .readTimeoutMillis(100).build();
        connectTimeoutClient.version();//from  ww  w  .ja  v  a 2s.c  o  m
    }
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

private boolean initCheckpointServer() {
    this.executor = Executors.newScheduledThreadPool(THREAD_POOL_SIZE, new ThreadFactory() {
        @Override//ww w.  j av a 2 s .c o  m
        public Thread newThread(Runnable r) {
            Thread thread = Executors.defaultThreadFactory().newThread(r);
            thread.setName(SQLReconfiguratorDB.class.getSimpleName() + myID);
            return thread;
        }
    });

    try {
        InetAddress addr = null;
        try {
            this.serverSock = new ServerSocket();
            // first try the configured address, else fall back to wildcard
            this.serverSock
                    .bind(new InetSocketAddress(addr = this.consistentNodeConfig.getBindAddress(myID), 0));
        } catch (IOException ioe) {
            log.info(this + " unable to open large checkpoint server socket on " + addr
                    + "; trying wildcard address instead");
            this.serverSock = new ServerSocket(0);
        }
        checkpointServerFuture = executor.submit(new CheckpointServer());
        return true;
    } catch (IOException e) {
        log.severe(this + " unable to open server socket for large checkpoint transfers");
        e.printStackTrace();
    }
    return false;
}

From source file:org.springframework.boot.web.servlet.server.AbstractServletWebServerFactoryTests.java

protected final void doWithBlockedPort(BlockedPortAction action) throws IOException {
    int port = SocketUtils.findAvailableTcpPort(40000);
    ServerSocket serverSocket = new ServerSocket();
    for (int i = 0; i < 10; i++) {
        try {//w w w  . j a v a  2 s .  c  om
            serverSocket.bind(new InetSocketAddress(port));
            break;
        } catch (Exception ex) {
        }
    }
    try {
        action.run(port);
    } finally {
        serverSocket.close();
    }
}