Example usage for java.net InetSocketAddress getHostName

List of usage examples for java.net InetSocketAddress getHostName

Introduction

In this page you can find the example usage for java.net InetSocketAddress getHostName.

Prototype

public final String getHostName() 

Source Link

Document

Gets the hostname .

Usage

From source file:org.apache.accumulo.minicluster.MiniAccumuloCluster.java

/**
 * @param config//from  ww  w.  ja  v a2 s  .co m
 *          initial configuration
 */
public MiniAccumuloCluster(MiniAccumuloConfig config) throws IOException {

    this.config = config.initialize();

    config.getConfDir().mkdirs();
    config.getAccumuloDir().mkdirs();
    config.getZooKeeperDir().mkdirs();
    config.getLogDir().mkdirs();
    config.getWalogDir().mkdirs();
    config.getLibDir().mkdirs();

    if (config.useMiniDFS()) {
        File nn = new File(config.getAccumuloDir(), "nn");
        nn.mkdirs();
        File dn = new File(config.getAccumuloDir(), "dn");
        dn.mkdirs();
        File dfs = new File(config.getAccumuloDir(), "dfs");
        dfs.mkdirs();
        Configuration conf = new Configuration();
        conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dn.getAbsolutePath());
        conf.set(DFSConfigKeys.DFS_REPLICATION_KEY, "1");
        conf.set("dfs.support.append", "true");
        conf.set("dfs.datanode.synconclose", "true");
        conf.set("dfs.datanode.data.dir.perm", MiniDFSUtil.computeDatanodeDirectoryPermission());
        String oldTestBuildData = System.setProperty("test.build.data", dfs.getAbsolutePath());
        miniDFS = new MiniDFSCluster(conf, 1, true, null);
        if (oldTestBuildData == null)
            System.clearProperty("test.build.data");
        else
            System.setProperty("test.build.data", oldTestBuildData);
        miniDFS.waitClusterUp();
        InetSocketAddress dfsAddress = miniDFS.getNameNode().getNameNodeAddress();
        dfsUri = "hdfs://" + dfsAddress.getHostName() + ":" + dfsAddress.getPort();
        File coreFile = new File(config.getConfDir(), "core-site.xml");
        writeConfig(coreFile, Collections.singletonMap("fs.default.name", dfsUri).entrySet());
        File hdfsFile = new File(config.getConfDir(), "hdfs-site.xml");
        writeConfig(hdfsFile, conf);

        Map<String, String> siteConfig = config.getSiteConfig();
        siteConfig.put(Property.INSTANCE_DFS_URI.getKey(), dfsUri);
        siteConfig.put(Property.INSTANCE_DFS_DIR.getKey(), "/accumulo");
        config.setSiteConfig(siteConfig);
    } else {
        dfsUri = "file://";
    }

    File siteFile = new File(config.getConfDir(), "accumulo-site.xml");
    writeConfig(siteFile, config.getSiteConfig().entrySet());

    FileWriter fileWriter = new FileWriter(siteFile);
    fileWriter.append("<configuration>\n");

    for (Entry<String, String> entry : config.getSiteConfig().entrySet())
        fileWriter.append("<property><name>" + entry.getKey() + "</name><value>" + entry.getValue()
                + "</value></property>\n");
    fileWriter.append("</configuration>\n");
    fileWriter.close();

    zooCfgFile = new File(config.getConfDir(), "zoo.cfg");
    fileWriter = new FileWriter(zooCfgFile);

    // zookeeper uses Properties to read its config, so use that to write in order to properly escape things like Windows paths
    Properties zooCfg = new Properties();
    zooCfg.setProperty("tickTime", "2000");
    zooCfg.setProperty("initLimit", "10");
    zooCfg.setProperty("syncLimit", "5");
    zooCfg.setProperty("clientPort", config.getZooKeeperPort() + "");
    zooCfg.setProperty("maxClientCnxns", "1000");
    zooCfg.setProperty("dataDir", config.getZooKeeperDir().getAbsolutePath());
    zooCfg.store(fileWriter, null);

    fileWriter.close();

    File nativeMap = new File(config.getLibDir().getAbsolutePath() + "/native/map");
    nativeMap.mkdirs();
    File testRoot = new File(
            new File(new File(System.getProperty("user.dir")).getParent() + "/server/src/main/c++/nativeMap")
                    .getAbsolutePath());

    if (testRoot.exists()) {
        for (String file : testRoot.list()) {
            File src = new File(testRoot, file);
            if (src.isFile() && file.startsWith("libNativeMap"))
                FileUtils.copyFile(src, new File(nativeMap, file));
        }
    }
}

From source file:com.xebialabs.overthere.telnet.TelnetConnection.java

public TelnetConnection(ConnectionOptions options, AddressPortMapper mapper, OverthereFile workingDirectory) {
    String unmappedAddress = options.get(ADDRESS);
    int unmappedPort = options.get(PORT, connectionType.getDefaultPort(options));
    InetSocketAddress addressPort = mapper.map(createUnresolved(unmappedAddress, unmappedPort));

    this.os = options.getEnum(OPERATING_SYSTEM, OperatingSystemFamily.class);
    this.connectionTimeoutMillis = options.getInteger(CONNECTION_TIMEOUT_MILLIS,
            CONNECTION_TIMEOUT_MILLIS_DEFAULT);
    this.socketTimeoutMillis = options.getInteger(SOCKET_TIMEOUT_MILLIS, SOCKET_TIMEOUT_MILLIS_DEFAULT);

    this.address = addressPort.getHostName();
    this.port = addressPort.getPort();
    this.username = options.get(USERNAME);
    this.password = options.get(PASSWORD);
    this.mapper = mapper;
    this.workingDirectory = workingDirectory;
    this.protocol = options.get(PROTOCOL);

    checkIsWindowsHost(os, protocol, connectionType);
    checkNotNewStyleWindowsDomain(username, protocol, connectionType);

}

From source file:org.cloudata.core.commitlog.ServerLocationManager.java

private void storeToImage(String tabletName, InetSocketAddress[] addrList) throws IOException {
    GPath commitLogMetaPath = new GPath(logImagePath + "/" + tabletName);

    if (fs.exists(commitLogMetaPath)) {
        if (isSameCommitLogInfo(commitLogMetaPath, addrList)) {
            LOG.info("Ignore deleting commit log meta path: " + (logImagePath + "/" + tabletName));
            return;
        } else {/* w ww  .j a  v a  2  s  . c  om*/
            LOG.info("Deleting commit log meta path: " + (logImagePath + "/" + tabletName) + " and remake");
            fs.delete(commitLogMetaPath, true);
        }
    }

    // META  ?
    OutputStream out = fs.create(commitLogMetaPath);
    for (InetSocketAddress eachServer : addrList) {
        out.write((eachServer.getHostName() + ":" + eachServer.getPort() + "\n").getBytes());
    }
    out.close();
}

From source file:org.cloudata.core.commitlog.CommitLogClient.java

private String getPipeAddressListStr() {
    String msg = "";
    for (InetSocketAddress addr : pipeAddressList) {
        msg = msg + addr.getHostName() + ":" + addr.getPort() + ", ";
    }//from w  w w .  j  a  va 2  s .c  o  m

    return msg;
}

From source file:org.apache.synapse.transport.passthru.core.PassThroughListeningIOReactorManager.java

/**
 * Close specific endpoints started by PTT Listeners using give set of bind addresses.
 *
 * @param port          Port of the listener
 * @param bindAddresses bind address list of endpoints to be closed
 * @return true if successfully closed, false if any error
 *///w  w w  .  ja  v  a2 s  .  co m
public boolean closeSpecificPTTListenerEndpoints(int port, Set<InetSocketAddress> bindAddresses) {
    try {
        ListeningIOReactor listeningIOReactor = passThroughListenerIOReactorMapper.get(port);
        if (listeningIOReactor != null) {
            Set<ListenerEndpoint> endpoints = listeningIOReactor.getEndpoints();
            // If it is shared IO Reactor then only close endpoints related to PTT Listener
            if (passThroughListenerServerIODispatchMapper.get(port) instanceof MultiListenerServerIODispatch) {
                for (ListenerEndpoint listenerEndpoint : endpoints) {
                    if (listenerEndpoint.getAddress() instanceof InetSocketAddress) {
                        int endPointPort = ((InetSocketAddress) listenerEndpoint.getAddress()).getPort();
                        if (dynamicPTTListeningEndpointMapper.containsKey(endPointPort)) {
                            continue;
                        }

                        for (InetSocketAddress inetSocketAddress : bindAddresses) {
                            if (inetSocketAddress.getHostName().equalsIgnoreCase(
                                    ((InetSocketAddress) listenerEndpoint.getAddress()).getHostName())) {
                                listenerEndpoint.close();
                            }
                        }
                    }
                }
            } else {
                for (ListenerEndpoint listenerEndpoint : endpoints) {
                    for (InetSocketAddress inetSocketAddress : bindAddresses) {
                        if (inetSocketAddress.getHostName().equalsIgnoreCase(
                                ((InetSocketAddress) listenerEndpoint.getAddress()).getHostName())) {
                            listenerEndpoint.close();
                        }
                    }
                }
            }
        }
        return true;
    } catch (Exception e) {
        log.error("Error occurred when closing Endpoint in PassThrough Transport Related to port " + port, e);
        return false;
    }
}

From source file:org.pentaho.di.ui.vfs.hadoopvfsfilechooserdialog.HadoopVfsFileChooserDialog.java

private void createConnectionPanel() {
    // The Connection group
    Group connectionGroup = new Group(this, SWT.SHADOW_ETCHED_IN);
    connectionGroup.setText(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.ConnectionGroup.Label")); //$NON-NLS-1$;
    GridLayout connectionGroupLayout = new GridLayout();
    connectionGroupLayout.marginWidth = 5;
    connectionGroupLayout.marginHeight = 5;
    connectionGroupLayout.verticalSpacing = 5;
    connectionGroupLayout.horizontalSpacing = 5;
    GridData gData = new GridData(SWT.FILL, SWT.FILL, true, false);
    connectionGroup.setLayoutData(gData);
    connectionGroup.setLayout(connectionGroupLayout);

    // The composite we need in the group
    Composite textFieldPanel = new Composite(connectionGroup, SWT.NONE);
    GridData gridData = new GridData(SWT.FILL, SWT.FILL, true, false);
    textFieldPanel.setLayoutData(gridData);
    textFieldPanel.setLayout(new GridLayout(5, false));

    // URL label and text field
    wlUrl = new Label(textFieldPanel, SWT.RIGHT);
    wlUrl.setText(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.URL.Label")); //$NON-NLS-1$
    fdlUrl = new GridData();
    fdlUrl.widthHint = 75;//from   w  w  w.  ja  v a 2 s.c o  m
    wlUrl.setLayoutData(fdlUrl);
    wUrl = new Text(textFieldPanel, SWT.SINGLE | SWT.LEFT | SWT.BORDER);
    fdUrl = new GridData();
    fdUrl.widthHint = 150;
    wUrl.setLayoutData(fdUrl);
    wUrl.setText(Props.getInstance().getCustomParameter("HadoopVfsFileChooserDialog.host", "localhost"));
    wUrl.addModifyListener(new ModifyListener() {
        public void modifyText(ModifyEvent arg0) {
            handleConnectionButton();
        }
    });

    // UserID label and field
    wlUserID = new Label(textFieldPanel, SWT.RIGHT);
    wlUserID.setText(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.UserID.Label")); //$NON-NLS-1$
    fdlUserID = new GridData();
    fdlUserID.widthHint = 75;
    wlUserID.setLayoutData(fdlUserID);

    wUserID = new Text(textFieldPanel, SWT.SINGLE | SWT.LEFT | SWT.BORDER);
    fdUserID = new GridData();
    fdUserID.widthHint = 150;
    wUserID.setLayoutData(fdUserID);
    wUserID.setText(Props.getInstance().getCustomParameter("HadoopVfsFileChooserDialog.user", ""));

    // Place holder
    wPlaceHolderLabel = new Label(textFieldPanel, SWT.RIGHT);
    wPlaceHolderLabel.setText("");
    fdlPlaceHolderLabel = new GridData();
    fdlPlaceHolderLabel.widthHint = 75;
    wlUserID.setLayoutData(fdlPlaceHolderLabel);

    // Port label and text field
    wlPort = new Label(textFieldPanel, SWT.RIGHT);
    wlPort.setText(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.Port.Label")); //$NON-NLS-1$
    fdlPort = new GridData();
    fdlPort.widthHint = 75;
    wlPort.setLayoutData(fdlPort);

    wPort = new Text(textFieldPanel, SWT.SINGLE | SWT.LEFT | SWT.BORDER);
    fdPort = new GridData();
    fdPort.widthHint = 150;
    wPort.setLayoutData(fdPort);
    wPort.setText(Props.getInstance().getCustomParameter("HadoopVfsFileChooserDialog.port", "9000"));
    wPort.addModifyListener(new ModifyListener() {
        public void modifyText(ModifyEvent arg0) {
            handleConnectionButton();
        }
    });

    // password label and field
    wlPassword = new Label(textFieldPanel, SWT.RIGHT);
    wlPassword.setText(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.Password.Label")); //$NON-NLS-1$
    fdlPassword = new GridData();
    fdlPassword.widthHint = 75;
    wlPassword.setLayoutData(fdlPassword);

    wPassword = new Text(textFieldPanel, SWT.SINGLE | SWT.LEFT | SWT.BORDER);
    wPassword.setEchoChar('*');
    fdPassword = new GridData();
    fdPassword.widthHint = 150;
    wPassword.setLayoutData(fdPassword);
    wPassword.setText(Props.getInstance().getCustomParameter("HadoopVfsFileChooserDialog.password", ""));

    // Connection button
    wConnectionButton = new Button(textFieldPanel, SWT.CENTER);
    fdConnectionButton = new GridData();
    fdConnectionButton.widthHint = 75;
    wConnectionButton.setLayoutData(fdConnectionButton);

    wConnectionButton.setText(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.ConnectionButton.Label"));
    wConnectionButton.addSelectionListener(new SelectionAdapter() {
        public void widgetSelected(SelectionEvent e) {

            // Store the successful connection info to hand off to VFS
            connectedHostname = wUrl.getText();
            connectedPortString = wPort.getText();

            try {

                // Create list of addresses to try. In non-HA environments, this will likely only have
                // one entry.
                ArrayList<InetSocketAddress> addressList = new ArrayList<InetSocketAddress>();

                // Before creating a socket, see if there is some name resolution we need to do
                // For example, in High Availability clusters, we might need to resolve the cluster name
                // (with no port) to a list of host:port pairs to try in sequence.
                // NOTE: If we could set the HDFS retry limit for the Test capability, we wouldn't need this
                // code. It's been fixed in later versions of Hadoop, but we can't be sure which version we're
                // using, or if a particular distribution has incorporated the fix.
                HadoopConfiguration hadoopConfig = getHadoopConfig();
                if (hadoopConfig != null) {
                    HadoopShim shim = hadoopConfig.getHadoopShim();
                    Configuration conf = shim.createConfiguration();
                    String haNameNodes = conf.get(HDFS_HA_CLUSTER_NAMENODES_PROP);
                    if (!Const.isEmpty(haNameNodes)) {

                        String[] haNameNode = haNameNodes.split(NAMENODE_LIST_DELIMITER);
                        if (!Const.isEmpty(haNameNode)) {
                            for (String nameNode : haNameNode) {
                                String nameNodeResolveProperty = HDFS_HA_CLUSTER_NAMENODE_RESOLVE_PREFIX
                                        + nameNode;
                                String nameNodeHostAndPort = conf.get(nameNodeResolveProperty);
                                if (!Const.isEmpty(nameNodeHostAndPort)) {
                                    String[] nameNodeParams = nameNodeHostAndPort
                                            .split(NAMENODE_HOSTNAME_PORT_DELIMITER);
                                    String hostname = nameNodeParams[0];
                                    int port = 0;
                                    if (nameNodeParams.length > 1) {
                                        try {
                                            port = Integer.parseInt(nameNodeParams[1]);
                                        } catch (NumberFormatException nfe) {
                                            // ignore, use default
                                        }
                                    }
                                    addressList.add(new InetSocketAddress(hostname, port));
                                    isHighAvailabilityCluster = true;
                                }
                            }
                        }
                    } else {
                        String hostname = wUrl.getText();
                        int port = 0;
                        try {
                            port = Integer.parseInt(wPort.getText());
                        } catch (NumberFormatException nfe) {
                            // ignore, use default
                        }
                        addressList.add(new InetSocketAddress(hostname, port));
                        isHighAvailabilityCluster = false;
                    }

                    boolean success = false;
                    StringBuffer connectMessage = new StringBuffer();
                    for (int i = 0; !success && i < addressList.size(); i++) {
                        InetSocketAddress address = addressList.get(i);
                        connectMessage.append("Connect ");
                        connectMessage.append(address.getHostName());
                        connectMessage.append(NAMENODE_HOSTNAME_PORT_DELIMITER);
                        connectMessage.append(address.getPort());
                        Socket testHdfsSocket = new Socket(address.getHostName(), address.getPort());
                        try {
                            testHdfsSocket.getOutputStream();
                            testHdfsSocket.close();
                            success = true;
                            connectedHostname = address.getHostName();
                            connectedPortString = Integer.toString(address.getPort());
                            connectMessage.append("=success!");
                        } catch (IOException ioe) {
                            // Add errors to message string, but otherwise ignore, we'll check for success later
                            connectMessage.append("=failed, ");
                            connectMessage.append(ioe.getMessage());
                            connectMessage.append(System.getProperty("line.separator"));
                        }
                    }
                    if (!success) {
                        throw new IOException(connectMessage.toString());
                    }
                } else {
                    throw new Exception("No active Hadoop Configuration specified!");
                }

            } catch (Throwable t) {
                showMessageAndLog(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.error"),
                        BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.Connection.error"),
                        t.getMessage());
                return;
            }

            Props.getInstance().setCustomParameter("HadoopVfsFileChooserDialog.host", wUrl.getText());
            Props.getInstance().setCustomParameter("HadoopVfsFileChooserDialog.port", connectedPortString);
            Props.getInstance().setCustomParameter("HadoopVfsFileChooserDialog.user", wUserID.getText());
            Props.getInstance().setCustomParameter("HadoopVfsFileChooserDialog.password", wPassword.getText());

            FileObject root = rootFile;
            try {
                root = KettleVFS.getFileObject(buildHadoopFileSystemUrlString());
            } catch (KettleFileException e1) {
                // Search for "unsupported scheme" message. The actual string has parameters that we won't be able to match,
                // so build a string with
                // known (dummy) params, then split to get the beginning string, then compare against the current exception's
                // message.
                final String unsupportedSchemeMessage = BaseMessages.getString(HadoopConfiguration.class,
                        "Error.UnsupportedSchemeForConfiguration", "@!@", "!@!");
                final String unsupportedSchemeMessagePrefix = unsupportedSchemeMessage.split("@!@")[0];
                final String message = e1.getMessage();
                if (message.contains(unsupportedSchemeMessagePrefix)) {
                    try {
                        HadoopConfiguration hadoopConfig = getHadoopConfig();
                        String hadoopConfigName = (hadoopConfig == null) ? "Unknown" : hadoopConfig.getName();
                        showMessageAndLog(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.error"),
                                BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.Connection.schemeError",
                                        hadoopConfigName),
                                message);
                    } catch (ConfigurationException ce) {
                        showMessageAndLog(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.error"),
                                BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.Connection.error"),
                                ce.getMessage());
                    }
                } else {
                    showMessageAndLog(BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.error"),
                            BaseMessages.getString(PKG, "HadoopVfsFileChooserDialog.Connection.error"),
                            e1.getMessage());
                }
                return;
            }
            vfsFileChooserDialog.setSelectedFile(root);
            vfsFileChooserDialog.setRootFile(root);
            rootFile = root;
        }
    });

    // set the tab order
    textFieldPanel.setTabList(new Control[] { wUrl, wPort, wUserID, wPassword, wConnectionButton });
}

From source file:org.jenkinsci.plugins.GithubSecurityRealm.java

/**
 * Returns the proxy to be used when connecting to the given URI.
 *//*from www .  j  a  va 2s. c  o m*/
private HttpHost getProxy(HttpUriRequest method) throws URIException {
    ProxyConfiguration proxy = Jenkins.getInstance().proxy;
    if (proxy == null)
        return null; // defensive check

    Proxy p = proxy.createProxy(method.getURI().getHost());
    switch (p.type()) {
    case DIRECT:
        return null; // no proxy
    case HTTP:
        InetSocketAddress sa = (InetSocketAddress) p.address();
        return new HttpHost(sa.getHostName(), sa.getPort());
    case SOCKS:
    default:
        return null; // not supported yet
    }
}

From source file:org.ballerinalang.net.http.HttpUtil.java

/**
 * Populates the HTTP caller with connection information.
 *
 * @param httpCaller   Represents the HTTP caller
 * @param inboundMsg   Represents the carbon message
 * @param httpResource Represents the Http Resource
 * @param config       Represents the service endpoint configuration
 *//*w w  w  .jav a  2  s  .  c o m*/
public static void enrichHttpCallerWithConnectionInfo(BMap<String, BValue> httpCaller,
        HttpCarbonMessage inboundMsg, HttpResource httpResource, Struct config) {
    BMap<String, BValue> remote = BLangConnectorSPIUtil.createBStruct(
            httpResource.getBalResource().getResourceInfo().getPackageInfo().getProgramFile(),
            PROTOCOL_PACKAGE_HTTP, HttpConstants.REMOTE);
    BMap<String, BValue> local = BLangConnectorSPIUtil.createBStruct(
            httpResource.getBalResource().getResourceInfo().getPackageInfo().getProgramFile(),
            PROTOCOL_PACKAGE_HTTP, HttpConstants.LOCAL);

    Object remoteSocketAddress = inboundMsg.getProperty(HttpConstants.REMOTE_ADDRESS);
    if (remoteSocketAddress instanceof InetSocketAddress) {
        InetSocketAddress inetSocketAddress = (InetSocketAddress) remoteSocketAddress;
        String remoteHost = inetSocketAddress.getHostName();
        long remotePort = inetSocketAddress.getPort();
        remote.put(HttpConstants.REMOTE_HOST_FIELD, new BString(remoteHost));
        remote.put(HttpConstants.REMOTE_PORT_FIELD, new BInteger(remotePort));
    }
    httpCaller.put(HttpConstants.REMOTE_STRUCT_FIELD, remote);

    Object localSocketAddress = inboundMsg.getProperty(HttpConstants.LOCAL_ADDRESS);
    if (localSocketAddress instanceof InetSocketAddress) {
        InetSocketAddress inetSocketAddress = (InetSocketAddress) localSocketAddress;
        String localHost = inetSocketAddress.getHostName();
        long localPort = inetSocketAddress.getPort();
        local.put(HttpConstants.LOCAL_HOST_FIELD, new BString(localHost));
        local.put(HttpConstants.LOCAL_PORT_FIELD, new BInteger(localPort));
    }
    httpCaller.put(HttpConstants.LOCAL_STRUCT_INDEX, local);
    httpCaller.put(HttpConstants.SERVICE_ENDPOINT_PROTOCOL_FIELD,
            new BString((String) inboundMsg.getProperty(HttpConstants.PROTOCOL)));
    httpCaller.put(HttpConstants.SERVICE_ENDPOINT_CONFIG_FIELD, (BMap<String, BValue>) config.getVMValue());
}

From source file:org.apache.hadoop.mapred.ProxyJobTracker.java

public ProxyJobTracker(CoronaConf conf) throws IOException {
    this.conf = conf;
    fs = FileSystem.get(conf);/*from  w  w w . j a  v a2  s.c om*/
    String infoAddr = conf.get("mapred.job.tracker.corona.proxyaddr", "0.0.0.0:0");
    InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    String infoBindAddress = infoSocAddr.getHostName();
    int port = infoSocAddr.getPort();
    LOCALMACHINE = infoBindAddress;
    startTime = getClock().getTime();

    CoronaConf coronaConf = new CoronaConf(conf);
    InetSocketAddress rpcSockAddr = NetUtils.createSocketAddr(coronaConf.getProxyJobTrackerAddress());
    rpcServer = RPC.getServer(this, rpcSockAddr.getHostName(), rpcSockAddr.getPort(),
            conf.getInt("corona.proxy.job.tracker.handler.count", 10), false, conf);
    rpcServer.start();

    LOG.info("ProxyJobTracker RPC Server up at " + rpcServer.getListenerAddress());

    infoServer = new HttpServer("proxyjt", infoBindAddress, port, port == 0, conf);
    infoServer.setAttribute("proxy.job.tracker", this);
    infoServer.setAttribute("conf", conf);
    infoServer.addServlet("proxy", "/proxy", ProxyJobTrackerServlet.class);
    // initialize history parameters.
    JobConf jobConf = new JobConf(conf);
    boolean historyInitialized = JobHistory.init(this, jobConf, this.LOCALMACHINE, this.startTime);
    if (historyInitialized) {
        JobHistory.initDone(jobConf, fs);
        String historyLogDir = JobHistory.getCompletedJobHistoryLocation().toString();
        FileSystem historyFS = new Path(historyLogDir).getFileSystem(conf);
        infoServer.setAttribute("historyLogDir", historyLogDir);
        infoServer.setAttribute("fileSys", historyFS);
    }
    infoServer.start();
    LOCALPORT = infoServer.getPort();

    context = MetricsUtil.getContext("mapred");
    metricsRecord = MetricsUtil.createRecord(context, "proxyjobtracker");
    context.registerUpdater(this);

    expireUnusedFilesInCache = new ExpireUnusedFilesInCache(conf, getClock(), new Path(getSystemDir()));
    expireUnusedFilesInCache.setName("Cache File cleanup thread");
    expireUnusedFilesInCache.start();

    // 10 days
    long clearJobFileThreshold = conf.getLong("mapred.job.file.expirethreshold", 864000000L);

    long clearJobFileInterval = conf.getLong("mapred.job.file.checkinterval", 86400000L);

    expireUnusedJobFiles = new ExpireUnusedJobFiles(getClock(), conf, new Path(getSystemDir()),
            UNUSED_JOBFILE_PATTERN, clearJobFileThreshold, clearJobFileInterval);
    expireUnusedJobFiles.setName("Job File Cleanup Thread");
    expireUnusedJobFiles.start();

    long clearJobHistoryThreshold = conf.getLong("mapred.job.history.expirethreshold", 864000000L);

    long clearJobHistoryInterval = conf.getLong("mapred.job.history.checkinterval", 86400000L);

    expireUnusedJobHistory = new ExpireUnusedJobFiles(getClock(), conf, new Path(conf.getSessionsLogDir()),
            UNUSED_JOBHISTORY_PATTERN, clearJobHistoryThreshold, clearJobHistoryInterval);

    expireUnusedJobHistory.setName("Job History Cleanup Thread");
    expireUnusedJobHistory.start();
    sessionHistoryManager = new SessionHistoryManager();
    sessionHistoryManager.setConf(conf);

    String target = conf.getProxyJobTrackerThriftAddress();
    InetSocketAddress addr = NetUtils.createSocketAddr(target);
    LOG.info("Trying to start the Thrift Server at: " + target);
    ServerSocket serverSocket = new ServerSocket(addr.getPort());
    thriftServer = TFactoryBasedThreadPoolServer
            .createNewServer(new CoronaProxyJobTrackerService.Processor(this), serverSocket, 5000);
    thriftServerThread = new TServerThread(thriftServer);
    thriftServerThread.start();
    LOG.info("Thrift server started on: " + target);
}

From source file:com.chinamobile.bcbsp.http.HttpServer.java

/**
 * Configure an ssl listener on the server.
 * @param addr//from  ww w  . j a va2 s .co m
 *        address to listen on
 * @param keystore
 *        location of the keystore
 * @param storPass
 *        password for the keystore
 * @param keyPass
 *        password for the key
 * @deprecated Use
 *        {@link #addSslListener(InetSocketAddress, Configuration, boolean)}
 */
@Deprecated
public void addSslListener(InetSocketAddress addr, String keystore, String storPass, String keyPass)
        throws IOException {
    if (webServer.isStarted()) {
        throw new IOException("Failed to add ssl listener");
    }
    SslSocketConnector sslListener = new SslSocketConnector();
    sslListener.setHost(addr.getHostName());
    sslListener.setPort(addr.getPort());
    sslListener.setKeystore(keystore);
    sslListener.setPassword(storPass);
    sslListener.setKeyPassword(keyPass);
    webServer.addConnector(sslListener);
}