Example usage for java.net InetSocketAddress getPort

List of usage examples for java.net InetSocketAddress getPort

Introduction

In this page you can find the example usage for java.net InetSocketAddress getPort.

Prototype

public final int getPort() 

Source Link

Document

Gets the port number.

Usage

From source file:org.apache.hadoop.raid.RaidNode.java

private void initialize(Configuration conf)
        throws IOException, SAXException, InterruptedException, RaidConfigurationException,
        ClassNotFoundException, ParserConfigurationException, URISyntaxException, JSONException {
    this.startTime = RaidNode.now();
    this.conf = conf;
    modTimePeriod = conf.getLong(RAID_MOD_TIME_PERIOD_KEY, RAID_MOD_TIME_PERIOD_DEFAULT);
    LOG.info("modTimePeriod: " + modTimePeriod);
    InetSocketAddress socAddr = RaidNode.getAddress(conf);
    int handlerCount = conf.getInt("fs.raidnode.handler.count", 10);
    addTmpJars(this.conf);
    // clean up temporay directory
    cleanUpTempDirectory(conf);//from ww  w.  ja  v a 2s.  c  o m

    // read in the configuration
    configMgr = new ConfigManager(conf);

    // create rpc server
    this.server = RPC.getServer(this, socAddr.getAddress().getHostAddress(), socAddr.getPort(), handlerCount,
            false, conf);

    // create checksum store if not exist  
    RaidNode.createChecksumStore(conf, true);

    // create stripe store if not exist
    RaidNode.createStripeStore(conf, true, FileSystem.get(conf));

    // The rpc-server port can be ephemeral... ensure we have the correct info
    this.serverAddress = this.server.getListenerAddress();
    LOG.info("RaidNode up at: " + this.serverAddress);
    // Instantiate the metrics singleton.
    RaidNodeMetrics.getInstance(RaidNodeMetrics.DEFAULT_NAMESPACE_ID);

    this.server.start(); // start RPC server

    // Create a block integrity monitor and start its thread(s)
    this.blockIntegrityMonitor = BlockIntegrityMonitor.createBlockIntegrityMonitor(conf);

    boolean useBlockFixer = !conf.getBoolean(RAID_DISABLE_CORRUPT_BLOCK_FIXER_KEY, false);
    boolean useBlockCopier = !conf.getBoolean(RAID_DISABLE_DECOMMISSIONING_BLOCK_COPIER_KEY, true);
    boolean useCorruptFileCounter = !conf.getBoolean(RAID_DISABLE_CORRUPTFILE_COUNTER_KEY, false);

    Runnable fixer = blockIntegrityMonitor.getCorruptionMonitor();
    if (useBlockFixer && (fixer != null)) {
        this.blockFixerThread = new Daemon(fixer);
        this.blockFixerThread.setName("Block Fixer");
        this.blockFixerThread.start();
    }

    Runnable copier = blockIntegrityMonitor.getDecommissioningMonitor();
    if (useBlockCopier && (copier != null)) {
        this.blockCopierThread = new Daemon(copier);
        this.blockCopierThread.setName("Block Copier");
        this.blockCopierThread.start();
    }

    Runnable counter = blockIntegrityMonitor.getCorruptFileCounter();
    if (useCorruptFileCounter && counter != null) {
        this.corruptFileCounterThread = new Daemon(counter);
        this.corruptFileCounterThread.setName("Corrupt File Counter");
        this.corruptFileCounterThread.start();
    }

    // start the deamon thread to fire polcies appropriately
    RaidNode.triggerMonitorSleepTime = conf.getLong(TRIGGER_MONITOR_SLEEP_TIME_KEY, SLEEP_TIME);
    RaidNode.underRedundantFilesProcessorSleepTime = conf
            .getLong(UNDER_REDUNDANT_FILES_PROCESSOR_SLEEP_TIME_KEY, SLEEP_TIME);
    this.triggerMonitor = new TriggerMonitor();
    this.triggerThread = new Daemon(this.triggerMonitor);
    this.triggerThread.setName("Trigger Thread");
    this.triggerThread.start();

    this.urfProcessor = new UnderRedundantFilesProcessor(conf);
    this.urfThread = new Daemon(this.urfProcessor);
    this.urfThread.setName("UnderRedundantFilesProcessor Thread");
    this.urfThread.start();

    // start the thread that monitor and moves blocks
    this.placementMonitor = new PlacementMonitor(conf);
    this.placementMonitor.start();

    // start the thread that deletes obsolete parity files
    this.purgeMonitor = new PurgeMonitor(conf, placementMonitor, this);
    this.purgeThread = new Daemon(purgeMonitor);
    this.purgeThread.setName("Purge Thread");
    this.purgeThread.start();

    // start the thread that creates HAR files
    this.harThread = new Daemon(new HarMonitor());
    this.harThread.setName("HAR Thread");
    this.harThread.start();

    // start the thread that collects statistics
    this.statsCollector = new StatisticsCollector(this, configMgr, conf);
    this.statsCollectorThread = new Daemon(statsCollector);
    this.statsCollectorThread.setName("Stats Collector");
    this.statsCollectorThread.start();

    this.directoryTraversalShuffle = conf.getBoolean(RAID_DIRECTORYTRAVERSAL_SHUFFLE, true);
    this.directoryTraversalThreads = conf.getInt(RAID_DIRECTORYTRAVERSAL_THREADS, 4);

    startHttpServer();
    this.registerMBean();

    initialized = true;
}

From source file:de.dal33t.powerfolder.clientserver.ServerClient.java

/**
 * @return the string representing the server address
 *///from  ww w.  ja v  a2  s .c o m
public String getServerString() {
    String addrStr;
    if (server != null) {
        if (server.isMySelf()) {
            addrStr = "myself";
        } else {
            InetSocketAddress addr = server.getReconnectAddress();
            if (addr != null) {
                if (addr.getAddress() != null) {
                    addrStr = NetworkUtil.getHostAddressNoResolve(addr.getAddress());
                } else {
                    addrStr = addr.getHostName();
                }
            } else {
                addrStr = "";
            }

            if (addr != null && addr.getPort() != ConnectionListener.DEFAULT_PORT) {
                addrStr += ":" + addr.getPort();
            }
        }
    } else {
        addrStr = "";
    }
    if (hasWebURL()) {
        return getWebURL();
    } else if (StringUtils.isNotBlank(addrStr)) {
        return "pf://" + addrStr;
    } else {
        return "n/a";
    }

}

From source file:org.apache.hadoop.hive.llap.LlapBaseInputFormat.java

private SubmitWorkRequestProto constructSubmitWorkRequestProto(SubmitWorkInfo submitWorkInfo, int taskNum,
        int attemptNum, InetSocketAddress address, Token<JobTokenIdentifier> token, byte[] fragmentBytes,
        byte[] fragmentBytesSignature, JobConf job) throws IOException {
    ApplicationId appId = submitWorkInfo.getFakeAppId();

    // This works, assuming the executor is running within YARN.
    String user = System.getenv(ApplicationConstants.Environment.USER.name());
    LOG.info("Setting user in submitWorkRequest to: " + user);

    ContainerId containerId = ContainerId.newInstance(ApplicationAttemptId.newInstance(appId, attemptNum),
            taskNum);/*  ww  w .j  a  v  a 2 s.co  m*/

    // Credentials can change across DAGs. Ideally construct only once per DAG.
    Credentials credentials = new Credentials();
    TokenCache.setSessionToken(token, credentials);
    ByteBuffer credentialsBinary = serializeCredentials(credentials);

    FragmentRuntimeInfo.Builder runtimeInfo = FragmentRuntimeInfo.newBuilder();
    runtimeInfo.setCurrentAttemptStartTime(System.currentTimeMillis());
    runtimeInfo.setWithinDagPriority(0);
    runtimeInfo.setDagStartTime(submitWorkInfo.getCreationTime());
    runtimeInfo.setFirstAttemptStartTime(submitWorkInfo.getCreationTime());
    runtimeInfo.setNumSelfAndUpstreamTasks(submitWorkInfo.getVertexParallelism());
    runtimeInfo.setNumSelfAndUpstreamCompletedTasks(0);

    SubmitWorkRequestProto.Builder builder = SubmitWorkRequestProto.newBuilder();

    VertexOrBinary.Builder vertexBuilder = VertexOrBinary.newBuilder();
    vertexBuilder.setVertexBinary(ByteString.copyFrom(submitWorkInfo.getVertexBinary()));
    if (submitWorkInfo.getVertexSignature() != null) {
        // Unsecure case?
        builder.setWorkSpecSignature(ByteString.copyFrom(submitWorkInfo.getVertexSignature()));
    }
    builder.setWorkSpec(vertexBuilder.build());
    builder.setFragmentNumber(taskNum);
    builder.setAttemptNumber(attemptNum);
    builder.setContainerIdString(containerId.toString());
    builder.setAmHost(LlapUtil.getAmHostNameFromAddress(address, job));
    builder.setAmPort(address.getPort());
    builder.setCredentialsBinary(ByteString.copyFrom(credentialsBinary));
    builder.setFragmentRuntimeInfo(runtimeInfo.build());
    builder.setInitialEventBytes(ByteString.copyFrom(fragmentBytes));
    if (fragmentBytesSignature != null) {
        builder.setInitialEventSignature(ByteString.copyFrom(fragmentBytesSignature));
    }
    return builder.build();
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

/** Wait until the given namenode gets registration from all the datanodes */
public void waitActive(int nnIndex) throws IOException {
    if (nameNodes.length == 0 || nameNodes[nnIndex] == null || nameNodes[nnIndex].nameNode == null) {
        return;/*from  ww w  .j  a  v  a 2  s.  co m*/
    }
    InetSocketAddress addr = nameNodes[nnIndex].nameNode.getServiceRpcAddress();
    assert addr.getPort() != 0;
    DFSClient client = new DFSClient(addr, conf);

    // ensure all datanodes have registered and sent heartbeat to the namenode
    while (shouldWait(client.datanodeReport(DatanodeReportType.LIVE), addr)) {
        try {
            LOG.info("Waiting for cluster to become active");
            Thread.sleep(100);
        } catch (InterruptedException e) {
        }
    }

    client.close();
}

From source file:org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager.java

/**
 * For generating datanode reports/*from  w ww .  j a va  2s  .  c o m*/
 */
public List<DatanodeDescriptor> getDatanodeListForReport(final DatanodeReportType type) {
    final boolean listLiveNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.LIVE;
    final boolean listDeadNodes = type == DatanodeReportType.ALL || type == DatanodeReportType.DEAD;
    final boolean listDecommissioningNodes = type == DatanodeReportType.ALL
            || type == DatanodeReportType.DECOMMISSIONING;

    ArrayList<DatanodeDescriptor> nodes;
    final HostFileManager.HostSet foundNodes = new HostFileManager.HostSet();
    final HostFileManager.HostSet includedNodes = hostFileManager.getIncludes();
    final HostFileManager.HostSet excludedNodes = hostFileManager.getExcludes();

    synchronized (datanodeMap) {
        nodes = new ArrayList<DatanodeDescriptor>(datanodeMap.size());
        for (DatanodeDescriptor dn : datanodeMap.values()) {
            final boolean isDead = isDatanodeDead(dn);
            final boolean isDecommissioning = dn.isDecommissionInProgress();
            if ((listLiveNodes && !isDead) || (listDeadNodes && isDead)
                    || (listDecommissioningNodes && isDecommissioning)) {
                nodes.add(dn);
            }
            foundNodes.add(HostFileManager.resolvedAddressFromDatanodeID(dn));
        }
    }

    if (listDeadNodes) {
        for (InetSocketAddress addr : includedNodes) {
            if (foundNodes.matchedBy(addr) || excludedNodes.match(addr)) {
                continue;
            }
            // The remaining nodes are ones that are referenced by the hosts
            // files but that we do not know about, ie that we have never
            // head from. Eg. an entry that is no longer part of the cluster
            // or a bogus entry was given in the hosts files
            //
            // If the host file entry specified the xferPort, we use that.
            // Otherwise, we guess that it is the default xfer port.
            // We can't ask the DataNode what it had configured, because it's
            // dead.
            DatanodeDescriptor dn = new DatanodeDescriptor(this.storageMap,
                    new DatanodeID(addr.getAddress().getHostAddress(), addr.getHostName(), "",
                            addr.getPort() == 0 ? defaultXferPort : addr.getPort(), defaultInfoPort,
                            defaultInfoSecurePort, defaultIpcPort));
            dn.setLastUpdate(0); // Consider this node dead for reporting
            nodes.add(dn);
        }
    }

    if (LOG.isDebugEnabled()) {
        LOG.debug("getDatanodeListForReport with " + "includedNodes = " + hostFileManager.getIncludes()
                + ", excludedNodes = " + hostFileManager.getExcludes() + ", foundNodes = " + foundNodes
                + ", nodes = " + nodes);
    }
    return nodes;
}

From source file:org.apache.hadoop.hdfs.server.namenode.TestCheckpoint.java

/**
 * Starts two namenodes and two secondary namenodes, verifies that secondary
 * namenodes are configured correctly to talk to their respective namenodes
 * and can do the checkpoint.//from   w  w w . j av a 2 s. c o m
 * 
 * @throws IOException
 */
@Test
public void testMultipleSecondaryNamenodes() throws IOException {
    Configuration conf = new HdfsConfiguration();
    String nameserviceId1 = "ns1";
    String nameserviceId2 = "ns2";
    conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1 + "," + nameserviceId2);
    MiniDFSCluster cluster = null;
    SecondaryNameNode secondary1 = null;
    SecondaryNameNode secondary2 = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf)
                .nnTopology(MiniDFSNNTopology.simpleFederatedTopology(conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
                .build();
        Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
        Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
        InetSocketAddress nn1RpcAddress = cluster.getNameNode(0).getNameNodeAddress();
        InetSocketAddress nn2RpcAddress = cluster.getNameNode(1).getNameNodeAddress();
        String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
        String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();

        // Set the Service Rpc address to empty to make sure the node specific
        // setting works
        snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
        snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");

        // Set the nameserviceIds
        snConf1.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1),
                nn1);
        snConf2.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2),
                nn2);

        secondary1 = startSecondaryNameNode(snConf1);
        secondary2 = startSecondaryNameNode(snConf2);

        // make sure the two secondary namenodes are talking to correct namenodes.
        assertEquals(secondary1.getNameNodeAddress().getPort(), nn1RpcAddress.getPort());
        assertEquals(secondary2.getNameNodeAddress().getPort(), nn2RpcAddress.getPort());
        assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2.getNameNodeAddress().getPort());

        // both should checkpoint.
        secondary1.doCheckpoint();
        secondary2.doCheckpoint();
    } finally {
        cleanup(secondary1);
        secondary1 = null;
        cleanup(secondary2);
        secondary2 = null;
        cleanup(cluster);
        cluster = null;
    }
}

From source file:com.mellanox.r4h.MiniDFSCluster.java

/**
 * Restart a datanode, on the same port if requested
 * /* w  w  w  .  ja v a2  s  . co  m*/
 * @param dnprop
 *            the datanode to restart
 * @param keepPort
 *            whether to use the same port
 * @return true if restarting is successful
 * @throws IOException
 */
public synchronized boolean restartDataNode(DataNodeProperties dnprop, boolean keepPort) throws IOException {
    Configuration conf = dnprop.conf;
    String[] args = dnprop.dnArgs;
    SecureResources secureResources = dnprop.secureResources;
    Configuration newconf = new HdfsConfiguration(conf); // save cloned config
    if (keepPort) {
        InetSocketAddress addr = dnprop.datanode.getXferAddress();
        conf.set(DFS_DATANODE_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + addr.getPort());
        conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, addr.getAddress().getHostAddress() + ":" + dnprop.ipcPort);
    }
    DataNode newDn = DataNode.createDataNode(args, conf, secureResources);
    dataNodes.add(new DataNodeProperties(newDn, newconf, args, secureResources, newDn.getIpcPort()));
    numDataNodes++;
    return true;
}

From source file:org.apache.hadoop.gateway.GatewayBasicFuncTest.java

@Test
public void testBasicOutboundHeaderUseCase() throws IOException {
    String root = "/tmp/GatewayBasicFuncTest/testBasicOutboundHeaderUseCase";
    String username = "hdfs";
    String password = "hdfs-password";
    InetSocketAddress gatewayAddress = driver.gateway.getAddresses()[0];

    driver.getMock("WEBHDFS").expect().method("PUT").pathInfo("/v1" + root + "/dir/file")
            .header("Host", driver.getRealAddr("WEBHDFS")).queryParam("op", "CREATE")
            .queryParam("user.name", username).respond().status(HttpStatus.SC_TEMPORARY_REDIRECT)
            .header("Location",
                    driver.getRealUrl("DATANODE") + "/v1" + root + "/dir/file?op=CREATE&user.name=hdfs");
    Response response = given()/*from www .  ja  v a 2 s . c  o m*/
            //.log().all()
            .auth().preemptive().basic(username, password).header("X-XSRF-Header", "jksdhfkhdsf")
            .queryParam("op", "CREATE").expect()
            //.log().ifError()
            .statusCode(HttpStatus.SC_TEMPORARY_REDIRECT).when()
            .put(driver.getUrl("WEBHDFS") + "/v1" + root + "/dir/file");
    String location = response.getHeader("Location");
    //System.out.println( location );
    log.debug("Redirect location: " + response.getHeader("Location"));
    if (driver.isUseGateway()) {
        MatcherAssert.assertThat(location,
                startsWith("http://" + gatewayAddress.getHostName() + ":" + gatewayAddress.getPort() + "/"));
        MatcherAssert.assertThat(location, containsString("?_="));
    }
    MatcherAssert.assertThat(location, not(containsString("host=")));
    MatcherAssert.assertThat(location, not(containsString("port=")));
}

From source file:org.apache.hadoop.hdfs.server.namenode.NameNode.java

@SuppressWarnings("deprecation")
private void startHttpServer(final Configuration conf) throws IOException {
    final String infoAddr = NetUtils.getServerAddress(conf, "dfs.info.bindAddress", "dfs.info.port",
            "dfs.http.address");
    final InetSocketAddress infoSocAddr = NetUtils.createSocketAddr(infoAddr);
    if (UserGroupInformation.isSecurityEnabled()) {
        String httpsUser = SecurityUtil.getServerPrincipal(
                conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoSocAddr.getHostName());
        if (httpsUser == null) {
            LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY
                    + " not defined in config. Starting http server as "
                    + SecurityUtil.getServerPrincipal(conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
                            serverAddress.getHostName())
                    + ": Kerberized SSL may be not function correctly.");
        } else {/*  www . jav  a2s . c  om*/
            // Kerberized SSL servers must be run from the host principal...
            LOG.info("Logging in as " + httpsUser + " to start http server.");
            SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
                    DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoSocAddr.getHostName());
        }
    }
    UserGroupInformation ugi = UserGroupInformation.getLoginUser();
    try {
        this.httpServer = ugi.doAs(new PrivilegedExceptionAction<HttpServer>() {
            @Override
            public HttpServer run() throws IOException, InterruptedException {
                String infoHost = infoSocAddr.getHostName();
                int infoPort = infoSocAddr.getPort();
                httpServer = new HttpServer("hdfs", infoHost, infoPort, infoPort == 0, conf,
                        SecurityUtil.getAdminAcls(conf, DFSConfigKeys.DFS_ADMIN)) {
                    {
                        if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
                            //add SPNEGO authentication filter for webhdfs
                            final String name = "SPNEGO";
                            final String classname = AuthFilter.class.getName();
                            final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
                            Map<String, String> params = getAuthFilterParams(conf);
                            defineFilter(webAppContext, name, classname, params, new String[] { pathSpec });
                            LOG.info("Added filter '" + name + "' (class=" + classname + ")");

                            // add webhdfs packages
                            addJerseyResourcePackage(NamenodeWebHdfsMethods.class.getPackage().getName() + ";"
                                    + Param.class.getPackage().getName(), pathSpec);
                        }
                    }

                    private Map<String, String> getAuthFilterParams(Configuration conf) throws IOException {
                        Map<String, String> params = new HashMap<String, String>();
                        String principalInConf = conf
                                .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
                        if (principalInConf != null && !principalInConf.isEmpty()) {
                            params.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, SecurityUtil
                                    .getServerPrincipal(principalInConf, serverAddress.getHostName()));
                        }
                        String httpKeytab = conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
                        if (httpKeytab != null && !httpKeytab.isEmpty()) {
                            params.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, httpKeytab);
                        }
                        return params;
                    }
                };

                boolean certSSL = conf.getBoolean("dfs.https.enable", false);
                boolean useKrb = UserGroupInformation.isSecurityEnabled();
                if (certSSL || useKrb) {
                    boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
                    InetSocketAddress secInfoSocAddr = NetUtils
                            .createSocketAddr(infoHost + ":" + conf.get("dfs.https.port", infoHost + ":" + 0));
                    Configuration sslConf = new Configuration(false);
                    if (certSSL) {
                        sslConf.addResource(conf.get("dfs.https.server.keystore.resource", "ssl-server.xml"));
                    }
                    httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth, useKrb);
                    // assume same ssl port for all datanodes
                    InetSocketAddress datanodeSslPort = NetUtils
                            .createSocketAddr(conf.get("dfs.datanode.https.address", infoHost + ":" + 50475));
                    httpServer.setAttribute("datanode.https.port", datanodeSslPort.getPort());
                }
                httpServer.setAttribute("name.node", NameNode.this);
                httpServer.setAttribute("name.node.address", getNameNodeAddress());
                httpServer.setAttribute("name.system.image", getFSImage());
                httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
                httpServer.addInternalServlet("getDelegationToken", GetDelegationTokenServlet.PATH_SPEC,
                        GetDelegationTokenServlet.class, true);
                httpServer.addInternalServlet("renewDelegationToken", RenewDelegationTokenServlet.PATH_SPEC,
                        RenewDelegationTokenServlet.class, true);
                httpServer.addInternalServlet("cancelDelegationToken", CancelDelegationTokenServlet.PATH_SPEC,
                        CancelDelegationTokenServlet.class, true);
                httpServer.addInternalServlet("fsck", "/fsck", FsckServlet.class, true);
                httpServer.addInternalServlet("getimage", "/getimage", GetImageServlet.class, true);
                httpServer.addInternalServlet("listPaths", "/listPaths/*", ListPathsServlet.class, false);
                httpServer.addInternalServlet("data", "/data/*", FileDataServlet.class, false);
                httpServer.addInternalServlet("checksum", "/fileChecksum/*",
                        FileChecksumServlets.RedirectServlet.class, false);
                httpServer.addInternalServlet("contentSummary", "/contentSummary/*",
                        ContentSummaryServlet.class, false);
                httpServer.start();

                // The web-server port can be ephemeral... ensure we have the correct info
                infoPort = httpServer.getPort();
                httpAddress = new InetSocketAddress(infoHost, infoPort);
                conf.set("dfs.http.address", infoHost + ":" + infoPort);
                LOG.info("Web-server up at: " + infoHost + ":" + infoPort);
                return httpServer;
            }
        });
    } catch (InterruptedException e) {
        throw new IOException(e);
    } finally {
        if (UserGroupInformation.isSecurityEnabled()
                && conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) {
            // Go back to being the correct Namenode principal
            LOG.info("Logging back in as "
                    + SecurityUtil.getServerPrincipal(conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
                            serverAddress.getHostName())
                    + " following http server start.");
            SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
                    DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY, serverAddress.getHostName());
        }
    }
}

From source file:edu.umass.cs.reconfiguration.SQLReconfiguratorDB.java

/**
 * Helper function for getRemoteCheckpoint above that actually fetches the
 * reads from the socket and writes to a local file.
 * //from   www .ja  v  a2 s  . c o  m
 * @param rcGroupName
 * @param sockAddr
 * @param remoteFilename
 * @param fileSize
 * @return
 */
private String getRemoteCheckpoint(String rcGroupName, InetSocketAddress sockAddr, String remoteFilename,
        long fileSize) {
    synchronized (this.fileSystemLock) {
        String request = remoteFilename + "\n";
        Socket sock = null;
        FileOutputStream fos = null;
        String localCPFilename = null;
        try {
            sock = new Socket(sockAddr.getAddress(), sockAddr.getPort());
            sock.getOutputStream().write(request.getBytes(CHARSET));
            InputStream inStream = (sock.getInputStream());
            if (!this.createCheckpointFile(localCPFilename = this.getCheckpointFile(rcGroupName)))
                return null;
            fos = new FileOutputStream(new File(localCPFilename));
            byte[] buf = new byte[1024];
            int nread = 0;
            int nTotalRead = 0;
            // read from sock, write to file
            while ((nread = inStream.read(buf)) >= 0) {
                /* Need to ensure that the read won't block forever if the
                 * remote endpoint crashes ungracefully and there is no
                 * exception triggered here. But this method itself is
                 * currently unused. */
                nTotalRead += nread;
                fos.write(buf, 0, nread);
            }
            // check exact expected file size
            if (nTotalRead != fileSize)
                localCPFilename = null;
        } catch (IOException e) {
            e.printStackTrace();
        } finally {
            try {
                if (fos != null)
                    fos.close();
                if (sock != null)
                    sock.close();
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
        return localCPFilename;
    }
}