Example usage for org.apache.hadoop.net NetUtils getConnectAddress

List of usage examples for org.apache.hadoop.net NetUtils getConnectAddress

Introduction

In this page you can find the example usage for org.apache.hadoop.net NetUtils getConnectAddress.

Prototype

public static InetSocketAddress getConnectAddress(InetSocketAddress addr) 

Source Link

Document

Returns an InetSocketAddress that a client can use to connect to the given listening address.

Usage

From source file:com.datatorrent.stram.engine.StreamingContainer.java

License:Apache License

@SuppressWarnings("unchecked")
public void setup(StreamingContainerContext ctx) {
    containerContext = ctx;//from  ww  w .j  av  a 2s  .  co m

    /* add a request factory local to this container */
    this.requestFactory = new RequestFactory();
    ctx.attributes.put(ContainerContext.REQUEST_FACTORY, requestFactory);

    heartbeatIntervalMillis = ctx.getValue(Context.DAGContext.HEARTBEAT_INTERVAL_MILLIS);
    firstWindowMillis = ctx.startWindowMillis;
    windowWidthMillis = ctx.getValue(Context.DAGContext.STREAMING_WINDOW_SIZE_MILLIS);
    checkpointWindowCount = ctx.getValue(Context.DAGContext.CHECKPOINT_WINDOW_COUNT);

    fastPublisherSubscriber = ctx.getValue(LogicalPlan.FAST_PUBLISHER_SUBSCRIBER);

    Map<Class<?>, Class<? extends StringCodec<?>>> codecs = ctx.getValue(Context.DAGContext.STRING_CODECS);
    StringCodecs.loadConverters(codecs);

    try {
        if (ctx.deployBufferServer) {
            eventloop.start();

            int bufferServerRAM = ctx.getValue(ContainerContext.BUFFER_SERVER_MB);
            logger.debug("buffer server memory {}", bufferServerRAM);
            int blockCount;
            int blocksize;
            if (bufferServerRAM < ContainerContext.BUFFER_SERVER_MB.defaultValue) {
                blockCount = 8;
                blocksize = bufferServerRAM / blockCount;
                if (blocksize < 1) {
                    blocksize = 1;
                }
            } else {
                blocksize = 64;
                blockCount = bufferServerRAM / blocksize;
            }
            // start buffer server, if it was not set externally
            bufferServer = new Server(0, blocksize * 1024 * 1024, blockCount);
            bufferServer.setAuthToken(ctx.getValue(StreamingContainerContext.BUFFER_SERVER_TOKEN));
            if (ctx.getValue(Context.DAGContext.BUFFER_SPOOLING)) {
                bufferServer.setSpoolStorage(new DiskStorage());
            }
            SocketAddress bindAddr = bufferServer.run(eventloop);
            logger.debug("Buffer server started: {}", bindAddr);
            this.bufferServerAddress = NetUtils.getConnectAddress(((InetSocketAddress) bindAddr));
        }
    } catch (IOException ex) {
        logger.warn("deploy request failed due to {}", ex);
        throw new IllegalStateException("Failed to deploy buffer server", ex);
    }

    for (Class<?> clazz : ContainerEvent.CONTAINER_EVENTS_LISTENERS) {
        try {
            Object newInstance = clazz.newInstance();
            singletons.put(clazz.getName(), newInstance);

            if (newInstance instanceof Component) {
                components.add((Component<ContainerContext>) newInstance);
            }

            eventBus.subscribe(newInstance);
        } catch (InstantiationException ex) {
            logger.warn("Container Event Listener Instantiation", ex);
        } catch (IllegalAccessException ex) {
            logger.warn("Container Event Listener Instantiation", ex);
        }
    }

    operateListeners(ctx, true);
}

From source file:com.datatorrent.stram.StramRecoveryTest.java

License:Apache License

@Test
public void testRpcFailover() throws Exception {
    String appPath = testMeta.dir;
    Configuration conf = new Configuration(false);
    final AtomicBoolean timedout = new AtomicBoolean();

    StreamingContainerUmbilicalProtocol impl = MockitoUtil
            .mockProtocol(StreamingContainerUmbilicalProtocol.class);

    Mockito.doAnswer(new org.mockito.stubbing.Answer<Void>() {
        @Override/*from  www .  ja v a2 s.c o m*/
        public Void answer(InvocationOnMock invocation) {
            LOG.debug("got call: " + invocation.getMethod());
            if (!timedout.get()) {
                try {
                    timedout.set(true);
                    Thread.sleep(1000);
                } catch (Exception e) {
                }
                //throw new RuntimeException("fail");
            }
            return null;
        }
    }).when(impl).log("containerId", "timeout");

    Server server = new RPC.Builder(conf).setProtocol(StreamingContainerUmbilicalProtocol.class)
            .setInstance(impl).setBindAddress("0.0.0.0").setPort(0).setNumHandlers(1).setVerbose(false).build();
    server.start();
    InetSocketAddress address = NetUtils.getConnectAddress(server);
    LOG.info("Mock server listening at " + address);

    int rpcTimeoutMillis = 500;
    int retryDelayMillis = 100;
    int retryTimeoutMillis = 500;

    FSRecoveryHandler recoveryHandler = new FSRecoveryHandler(appPath, conf);
    URI uri = RecoverableRpcProxy.toConnectURI(address, rpcTimeoutMillis, retryDelayMillis, retryTimeoutMillis);
    recoveryHandler.writeConnectUri(uri.toString());

    RecoverableRpcProxy rp = new RecoverableRpcProxy(appPath, conf);
    StreamingContainerUmbilicalProtocol protocolProxy = rp.getProxy();
    protocolProxy.log("containerId", "msg");
    // simulate socket read timeout
    try {
        protocolProxy.log("containerId", "timeout");
        Assert.fail("expected socket timeout");
    } catch (java.net.SocketTimeoutException e) {
        // expected
    }
    Assert.assertTrue("timedout", timedout.get());
    rp.close();

    // test success on retry
    timedout.set(false);
    retryTimeoutMillis = 1500;
    uri = RecoverableRpcProxy.toConnectURI(address, rpcTimeoutMillis, retryDelayMillis, retryTimeoutMillis);
    recoveryHandler.writeConnectUri(uri.toString());

    protocolProxy.log("containerId", "timeout");
    Assert.assertTrue("timedout", timedout.get());

    rp.close();
    server.stop();
}

From source file:com.datatorrent.stram.StreamingAppMasterService.java

License:Apache License

@Override
protected void serviceStart() throws Exception {
    super.serviceStart();
    if (UserGroupInformation.isSecurityEnabled()) {
        delegationTokenManager.startThreads();
    }//from www. j ava2  s  . c  o m

    // write the connect address for containers to DFS
    InetSocketAddress connectAddress = NetUtils.getConnectAddress(this.heartbeatListener.getAddress());
    URI connectUri = new URI("stram", null, connectAddress.getHostName(), connectAddress.getPort(), null, null,
            null);
    FSRecoveryHandler recoveryHandler = new FSRecoveryHandler(dag.assertAppPath(), getConfig());
    recoveryHandler.writeConnectUri(connectUri.toString());

    // start web service
    try {
        org.mortbay.log.Log.setLog(null);
    } catch (Throwable throwable) {
        // SPOI-2687. As part of Pivotal Certification, we need to catch ClassNotFoundException as Pivotal was using Jetty 7 where as other distros are using Jetty 6.
        // LOG.error("can't set the log to null: ", throwable);
    }

    try {
        Configuration config = getConfig();
        if (UserGroupInformation.isSecurityEnabled()) {
            config = new Configuration(config);
            config.set("hadoop.http.filter.initializers", StramWSFilterInitializer.class.getCanonicalName());
        }
        WebApp webApp = WebApps.$for("stram", StramAppContext.class, appContext, "ws").with(config)
                .start(new StramWebApp(this.dnmgr));
        LOG.info("Started web service at port: " + webApp.port());
        this.appMasterTrackingUrl = NetUtils.getConnectAddress(webApp.getListenerAddress()).getHostName() + ":"
                + webApp.port();
        LOG.info("Setting tracking URL to: " + appMasterTrackingUrl);
    } catch (Exception e) {
        LOG.error("Webapps failed to start. Ignoring for now:", e);
    }
}

From source file:com.datatorrent.stram.StreamingContainerParent.java

License:Apache License

protected void startRpcServer() {
    Configuration conf = getConfig();
    LOG.info("Config: " + conf);
    LOG.info("Listener thread count " + listenerThreadCount);
    try {/*from w ww  .j  a v  a  2 s  . c  o m*/
        server = new RPC.Builder(conf).setProtocol(StreamingContainerUmbilicalProtocol.class).setInstance(this)
                .setBindAddress("0.0.0.0").setPort(0).setNumHandlers(listenerThreadCount)
                .setSecretManager(tokenSecretManager).setVerbose(false).build();

        // Enable service authorization?
        if (conf.getBoolean(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION, false)) {
            //refreshServiceAcls(conf, new MRAMPolicyProvider());
            server.refreshServiceAcl(conf, new PolicyProvider() {

                @Override
                public Service[] getServices() {
                    return (new Service[] { new Service(StreamingContainerUmbilicalProtocol.class.getName(),
                            StreamingContainerUmbilicalProtocol.class) });
                }

            });
        }

        server.start();
        this.address = NetUtils.getConnectAddress(server);
        LOG.info("Container callback server listening at " + this.address);
    } catch (IOException e) {
        throw new YarnRuntimeException(e);
    }
}

From source file:com.intel.hadoopRPCBenchmark.HadoopRPCBenchmarkEngine.java

License:Apache License

private BenchmarkEngineProtocol getRPCProxy() throws Exception {
    final BenchmarkEngineProtocol proxy;
    final InetSocketAddress addr = NetUtils.getConnectAddress(server);

    if (clientUgi != null) {
        proxy = clientUgi.doAs(new PrivilegedExceptionAction<BenchmarkEngineProtocol>() {
            public BenchmarkEngineProtocol run() throws IOException {
                return new BenchmarkEngineProtocolClientSideTranslatorPB(addr, conf);
            }/*from  w ww . j a  va  2  s.c o  m*/
        });
    } else {
        proxy = new BenchmarkEngineProtocolClientSideTranslatorPB(addr, conf);
    }

    return proxy;
}

From source file:com.newland.bi.bigdata.hdfs.Configuration.java

License:Apache License

/**
 * Set the socket address a client can use to connect for the
 * <code>name</code> property as a <code>host:port</code>.  The wildcard
 * address is replaced with the local host's address.
 * @param name property name.//from   ww w .j  a va2s  . co  m
 * @param addr InetSocketAddress of a listener to store in the given property
 * @return InetSocketAddress for clients to connect
 */
public InetSocketAddress updateConnectAddr(String name, InetSocketAddress addr) {
    final InetSocketAddress connectAddr = NetUtils.getConnectAddress(addr);
    setSocketAddr(name, connectAddr);
    return connectAddr;
}

From source file:edu.illinois.enforcemop.examples.hadoop.TestRPC.java

License:Apache License

@Test
//@Schedule(name = "slowDone", value = "slowrpcDone@SlowRPC->beforeStop@main") 
public void testSlowRpc() throws Exception {
    System.out.println("Testing Slow RPC");
    // create a server with two handlers
    Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, 0, 2, false, conf, null);
    TestProtocol proxy = null;//from  w w  w  .j  a va2  s  . co m

    try {
        server.start();

        InetSocketAddress addr = NetUtils.getConnectAddress(server);

        // create a client
        proxy = (TestProtocol) RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);

        SlowRPC slowrpc = new SlowRPC(proxy);
        Thread thread = new Thread(slowrpc, "SlowRPC");
        thread.start(); // send a slow RPC, which won't return until two fast pings
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // first fast ping

        // verify that the first RPC is still stuck
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // second fast ping

        // Now the slow ping should be able to be executed

        //Original code :
        //OPWAIT while (!slowrpc.isDone()) {
        //OPWAIT  System.out.println("Waiting for slow RPC to get done.");
        //OPWAIT  try {
        //    Thread.sleep(1000);
        //OPWAIT  } catch (Exception e) {}
        //OPWAIT }

        try {
            Thread.sleep(1000);
        } catch (Exception e) {
        }

    } finally {
        server.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        System.out.println("Down slow rpc testing");
    }
    //Interrupt thread manually
    Thread[] t = new Thread[2];
    Thread.enumerate(t);
    t[1].interrupt();
}

From source file:edu.illinois.imunit.examples.hadoop.TestRPC.java

License:Apache License

@Test
@Schedule(name = "slowDone", value = "slowrpcDone@SlowRPC->beforeStop@main")
public void testSlowRpc() throws Exception {
    System.out.println("Testing Slow RPC");
    // create a server with two handlers
    Server server = RPC.getServer(TestProtocol.class, new TestImpl(), ADDRESS, 0, 2, false, conf, null);
    TestProtocol proxy = null;//w ww  .  jav a  2  s. co  m

    try {
        server.start();

        InetSocketAddress addr = NetUtils.getConnectAddress(server);

        // create a client
        proxy = (TestProtocol) RPC.getProxy(TestProtocol.class, TestProtocol.versionID, addr, conf);

        SlowRPC slowrpc = new SlowRPC(proxy);
        Thread thread = new Thread(slowrpc, "SlowRPC");
        thread.start(); // send a slow RPC, which won't return until two fast pings
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // first fast ping

        // verify that the first RPC is still stuck
        assertTrue("slowDone", !slowrpc.isDone());

        proxy.slowPing(false); // second fast ping

        // Now the slow ping should be able to be executed

        //Original code :
        //OPWAIT while (!slowrpc.isDone()) {
        //OPWAIT  System.out.println("Waiting for slow RPC to get done.");
        //OPWAIT  try {
        //    Thread.sleep(1000);
        //OPWAIT  } catch (Exception e) {}
        //OPWAIT }

    } finally {
        fireEvent("beforeStop");
        server.stop();
        if (proxy != null) {
            RPC.stopProxy(proxy);
        }
        System.out.println("Down slow rpc testing");
    }
    //Interrupt thread manually
    Thread[] t = new Thread[2];
    Thread.enumerate(t);
    t[1].interrupt();
}

From source file:io.hops.metadata.util.DistributedRTClientEvaluation.java

License:Apache License

public void start() throws YarnException, IOException, InterruptedException {

    //Assign nodes to worker threads
    //Register nodes
    ResourceTracker rt = (ResourceTracker) RpcClientFactoryPBImpl.get()
            .getClient(/*  w w  w  .  j av a  2 s .  c o  m*/
                    ResourceTracker.class, 1, NetUtils
                            .getConnectAddress(
                                    new InetSocketAddress(
                                            conf.get(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS),
                                            conf.getInt(YarnConfiguration.RM_RESOURCE_TRACKER_PORT,
                                                    YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT))),
                    conf);
    for (NodeId nid : nmMap.values()) {
        registerClient(rt, nid);
    }
    //Wait for processing to complete
    //TODO: Get Active RMNodes from ndb instead of sleeping
    Thread.sleep(5000);
    //Send heartbeats

    for (NodeId nid : nmMap.values()) {
        //Send the Heartbeats

        executor.execute(new RTClientWorker(nid));
        Thread.sleep(hbPeriod / nbNM);

    }
    executor.shutdown();
    Thread.sleep(duration / 4);
    nbTreatedScheduler.set(0);
    nbTreatedRT.set(0);
    long start = System.currentTimeMillis();
    Thread.sleep(duration / 2);
    double nbHBTheoric = ((double) nbNM * duration / 2) / hbPeriod;
    System.out.printf("nb treatedRT %d, nb treatedScheduler %d, theorical nbhb: %f, duration: %d\n",
            nbTreatedRT.get(), nbTreatedScheduler.get(), nbHBTheoric, System.currentTimeMillis() - start);
    double treatedSchedulerRate = (double) nbTreatedScheduler.get() / nbHBTheoric;
    double treatedRTRate = (double) nbTreatedRT.get() / nbHBTheoric;
    LOG.info("treatedSchedulerRate: " + treatedSchedulerRate);

    File file = new File(output);
    if (!file.exists()) {
        file.createNewFile();
    }
    FileWriter fileWriter = new FileWriter(output, true);

    BufferedWriter bufferWritter = new BufferedWriter(fileWriter);
    bufferWritter.write(nbNMTotal + "\t" + nbTreatedRT.get() + "\t" + nbTreatedScheduler.get() + "\t"
            + nbHBTheoric + "\t" + treatedRTRate + "\t" + treatedSchedulerRate + "\n");
    bufferWritter.close();

    Thread.sleep(1000);
    System.exit(0);
}

From source file:me.haohui.libhdfspp.TestRpcEngine.java

License:Apache License

@BeforeClass
public static void setUp() throws IOException {
    conf = new Configuration();
    conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024);
    // Set RPC engine to protobuf RPC engine
    RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class);

    // Create server side implementation
    PBServerImpl serverImpl = new PBServerImpl();
    BlockingService service = TestRpcServiceProtos.TestProtobufRpcProto
            .newReflectiveBlockingService(serverImpl);

    // Get RPC server for server side implementation
    server = new RPC.Builder(conf).setProtocol(TestRpcService.class).setInstance(service)
            .setBindAddress(ADDRESS).setPort(PORT).build();
    addr = NetUtils.getConnectAddress(server);
    server.start();//  w  w  w.jav a2s . c  o  m
    executor.start();
}