Example usage for java.net InetSocketAddress toString

List of usage examples for java.net InetSocketAddress toString

Introduction

In this page you can find the example usage for java.net InetSocketAddress toString.

Prototype

@Override
public String toString() 

Source Link

Document

Constructs a string representation of this InetSocketAddress.

Usage

From source file:ch.epfl.eagle.daemon.nodemonitor.NodeMonitor.java

public void requestTaskReservations() {
    LOG.info(Logging.functionCall());/*from   w ww.  java 2 s .  c  o m*/
    // 1. call sendTaskReservations to n workers
    List<InetSocketAddress> listBackends = getCleanWorkersList();
    // Get the big partition
    LOG.debug("STEALING: Initial node list size: " + listBackends.size());
    int last_nodeID = (int) (listBackends.size() * bigPartition / 100);
    listBackends = listBackends.subList(0, last_nodeID);
    LOG.debug("STEALING: Using nodes from 0 to " + last_nodeID + ". List consists of : " + listBackends);

    LOG.debug("STEALING: New list of backends " + listBackends.toString());
    Collections.shuffle(listBackends);
    InetSocketAddress chosenBackend = listBackends.get(0);

    try {
        InternalService.AsyncClient client = nodeMonitorClientPool.borrowClient(chosenBackend);
        stealingAttempts++;
        LOG.debug("STEALING: Launching sendTasksReservations on node: " + chosenBackend + " stealing attempts"
                + stealingAttempts);
        client.sendTasksReservations(new SendTaskReservationsCallback(chosenBackend, client));
        LOG.debug("STEALING: Finished launching sendTasksReservations on node: " + chosenBackend);
    } catch (Exception e) {
        LOG.error("Error enqueuing task on node " + chosenBackend.toString() + ":" + e);
    }

}

From source file:com.streamsets.pipeline.lib.parser.net.netflow.TestNetflowDecoder.java

@Test
public void senderAndReceiver() throws IOException, OnRecordErrorException {
    final NetflowCommonDecoder decoder = makeNetflowDecoder();

    final byte[] bytes = getV9MessagesBytes7Flows();
    final List<BaseNetflowMessage> messages = new LinkedList<>();
    final InetSocketAddress senderAddr = InetSocketAddress.createUnresolved("hostA", 1234);
    final InetSocketAddress recipientAddr = InetSocketAddress.createUnresolved("hostB", 5678);
    decoder.decodeStandaloneBuffer(Unpooled.copiedBuffer(bytes), messages, senderAddr, recipientAddr);

    assertThat(messages, hasSize(7));/*from  w  ww  .j  av  a2s .  c om*/
    final BaseNetflowMessage firstBaseMsg = messages.get(0);
    assertThat(firstBaseMsg, instanceOf(NetflowV9Message.class));
    final NetflowV9Message firstMsg = (NetflowV9Message) firstBaseMsg;
    assertThat(firstMsg.getSender(), notNullValue());
    assertThat(firstMsg.getRecipient(), notNullValue());
    assertThat(firstMsg.getSender().toString(), equalTo(senderAddr.toString()));
    assertThat(firstMsg.getRecipient().toString(), equalTo(recipientAddr.toString()));

    Record record = RecordCreator.create();
    firstMsg.populateRecord(record);

    assertThat(record.get("/" + NetflowV9Message.FIELD_SENDER), fieldWithValue(senderAddr.toString()));
    assertThat(record.get("/" + NetflowV9Message.FIELD_RECIPIENT), fieldWithValue(recipientAddr.toString()));
}

From source file:org.apache.hadoop.hdfs.server.namenode.JspHelper.java

public void streamBlockInAscii(InetSocketAddress addr, long blockId, Token<BlockTokenIdentifier> accessToken,
        long genStamp, long blockSize, long offsetIntoBlock, long chunkSizeToView, JspWriter out,
        Configuration conf) throws IOException {
    if (chunkSizeToView == 0)
        return;//from  w w  w.  j a v  a  2 s  .c o m
    Socket s = new Socket();
    s.connect(addr, HdfsConstants.READ_TIMEOUT);
    s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

    long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);

    // Use the block name for file name. 
    DFSClient.BlockReader blockReader = DFSClient.BlockReader.newBlockReader(s, addr.toString() + ":" + blockId,
            blockId, accessToken, genStamp, offsetIntoBlock, amtToRead,
            conf.getInt("io.file.buffer.size", 4096));

    byte[] buf = new byte[(int) amtToRead];
    int readOffset = 0;
    int retries = 2;
    while (amtToRead > 0) {
        int numRead;
        try {
            numRead = blockReader.readAll(buf, readOffset, (int) amtToRead);
        } catch (IOException e) {
            retries--;
            if (retries == 0)
                throw new IOException("Could not read data from datanode");
            continue;
        }
        amtToRead -= numRead;
        readOffset += numRead;
    }
    blockReader = null;
    s.close();
    out.print(HtmlQuoting.quoteHtmlChars(new String(buf)));
}

From source file:ws.argo.mcg.GatewayReceiver.java

boolean joinGroup() {
    boolean success = true;
    InetSocketAddress socketAddress = new InetSocketAddress(multicastAddress, multicastPort);
    try {//from w w w.jav a2s.c  om
        // Setup for incoming multicast requests
        maddress = InetAddress.getByName(multicastAddress);

        if (niName != null)
            ni = NetworkInterface.getByName(niName);

        if (ni == null) {
            InetAddress localhost = InetAddress.getLocalHost();
            LOGGER.fine("Network Interface name not specified.  Using the NI for localhost "
                    + localhost.getHostAddress());
            ni = NetworkInterface.getByInetAddress(localhost);
        }

        this.outboundSocket = new MulticastSocket(multicastPort);
        // for some reason NI is still NULL. Check /etc/hosts
        if (ni == null) {
            this.outboundSocket.joinGroup(maddress);
            LOGGER.warning(
                    "Unable to determine the network interface for the localhost address.  Check /etc/hosts for wierd entry like 127.0.1.1 mapped to DNS name.");
            LOGGER.info("Unknown network interface joined group " + socketAddress.toString());
        } else {
            this.outboundSocket.joinGroup(socketAddress, ni);
            LOGGER.info(ni.getName() + " joined group " + socketAddress.toString());
        }
    } catch (IOException e) {
        StringBuffer buf = new StringBuffer();
        try {
            buf.append("(lb:" + this.ni.isLoopback() + " ");
        } catch (SocketException e2) {
            buf.append("(lb:err ");
        }
        try {
            buf.append("m:" + this.ni.supportsMulticast() + " ");
        } catch (SocketException e3) {
            buf.append("(m:err ");
        }
        try {
            buf.append("p2p:" + this.ni.isPointToPoint() + " ");
        } catch (SocketException e1) {
            buf.append("p2p:err ");
        }
        try {
            buf.append("up:" + this.ni.isUp() + " ");
        } catch (SocketException e1) {
            buf.append("up:err ");
        }
        buf.append("v:" + this.ni.isVirtual() + ") ");

        System.out.println(this.ni.getName() + " " + buf.toString() + ": could not join group "
                + socketAddress.toString() + " --> " + e.toString());

        success = false;
    }
    return success;
}

From source file:ws.argo.mcg.GatewaySender.java

boolean joinGroup() {
    boolean success = true;
    InetSocketAddress socketAddress = new InetSocketAddress(multicastAddress, multicastPort);
    try {/*from   w  ww  .j a v  a 2s.  c om*/
        // Setup for incoming multicast requests

        maddress = InetAddress.getByName(multicastAddress);

        if (niName != null)
            ni = NetworkInterface.getByName(niName);
        if (ni == null) {
            InetAddress localhost = InetAddress.getLocalHost();
            LOGGER.fine("Network Interface name not specified or incorrect.  Using the NI for localhost "
                    + localhost.getHostAddress());
            ni = NetworkInterface.getByInetAddress(localhost);
        }

        LOGGER.info("Starting GatewaySender:  Receiving mulitcast @ " + multicastAddress + ":" + multicastPort
                + " -- Sending unicast @ " + unicastAddress + ":" + unicastPort);
        this.inboundSocket = new MulticastSocket(multicastPort);
        if (ni == null) { // for some reason NI is still NULL. Not sure why this
                          // happens.
            this.inboundSocket.joinGroup(maddress);
            LOGGER.warning(
                    "Unable to determine the network interface for the localhost address. Check /etc/hosts for weird entry like 127.0.1.1 mapped to DNS name.");
            LOGGER.info("Unknown network interface joined group " + socketAddress.toString());
        } else {
            this.inboundSocket.joinGroup(socketAddress, ni);
            LOGGER.info(ni.getName() + " joined group " + socketAddress.toString());
        }

    } catch (IOException e) {
        StringBuffer buf = new StringBuffer();
        try {
            buf.append("(lb:" + this.ni.isLoopback() + " ");
        } catch (SocketException e2) {
            buf.append("(lb:err ");
        }
        try {
            buf.append("m:" + this.ni.supportsMulticast() + " ");
        } catch (SocketException e3) {
            buf.append("(m:err ");
        }
        try {
            buf.append("p2p:" + this.ni.isPointToPoint() + " ");
        } catch (SocketException e1) {
            buf.append("p2p:err ");
        }
        try {
            buf.append("up:" + this.ni.isUp() + " ");
        } catch (SocketException e1) {
            buf.append("up:err ");
        }
        buf.append("v:" + this.ni.isVirtual() + ") ");

        System.out.println(this.ni.getName() + " " + buf.toString() + ": could not join group "
                + socketAddress.toString() + " --> " + e.toString());

        success = false;
    }
    return success;
}

From source file:org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.java

private void copyBlock(DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;/*from ww  w .j ava  2s .c o m*/
    DFSClient.BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
        DatanodeInfo chosenNode;

        try {
            chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
            targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
        } catch (IOException ie) {
            if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
                throw new IOException("Could not obtain block " + lblock);
            }
            LOG.info("Could not obtain block from any node:  " + ie);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear();
            failures++;
            continue;
        }
        try {
            s = new Socket();
            s.connect(targetAddr, HdfsConstants.READ_TIMEOUT);
            s.setSoTimeout(HdfsConstants.READ_TIMEOUT);

            blockReader = DFSClient.BlockReader.newBlockReader(s,
                    targetAddr.toString() + ":" + block.getBlockId(), block.getBlockId(),
                    lblock.getBlockToken(), block.getGenerationStamp(), 0, -1,
                    conf.getInt("io.file.buffer.size", 4096));

        } catch (IOException ex) {
            // Put chosen node into dead list, continue
            LOG.info("Failed to connect to " + targetAddr + ":" + ex);
            deadNodes.add(chosenNode);
            if (s != null) {
                try {
                    s.close();
                } catch (IOException iex) {
                }
            }
            s = null;
        }
    }
    if (blockReader == null) {
        throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
        while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
            fos.write(buf, 0, cnt);
            bytesRead += cnt;
        }
        if (bytesRead != block.getNumBytes()) {
            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned "
                    + bytesRead + " bytes");
        }
    } catch (Exception e) {
        e.printStackTrace();
        success = false;
    } finally {
        try {
            s.close();
        } catch (Exception e1) {
        }
    }
    if (!success)
        throw new Exception("Could not copy block data for " + lblock.getBlock());
}

From source file:org.eclipse.smarthome.binding.lifx.internal.LifxLightDiscovery.java

private boolean sendPacket(Packet packet, InetSocketAddress address, SelectionKey selectedKey) {

    boolean result = false;

    try {//from   w ww  .ja  v a2  s  .c  o  m
        boolean sent = false;

        while (!sent) {
            try {
                selector.selectNow();
            } catch (IOException e) {
                logger.error("An exception occurred while selecting: {}", e.getMessage());
            }

            Set<SelectionKey> selectedKeys = selector.selectedKeys();

            Iterator<SelectionKey> keyIterator = selectedKeys.iterator();

            while (keyIterator.hasNext()) {
                SelectionKey key = keyIterator.next();

                if (key.isValid() && key.isWritable() && key.equals(selectedKey)) {
                    SelectableChannel channel = key.channel();
                    try {
                        if (channel instanceof DatagramChannel) {
                            logger.trace(
                                    "Discovery : Sending packet type '{}' from '{}' to '{}' for '{}' with sequence '{}' and source '{}'",
                                    new Object[] { packet.getClass().getSimpleName(),
                                            ((InetSocketAddress) ((DatagramChannel) channel).getLocalAddress())
                                                    .toString(),
                                            address.toString(), packet.getTarget().getHex(),
                                            packet.getSequence(), Long.toString(packet.getSource(), 16) });
                            ((DatagramChannel) channel).send(packet.bytes(), address);

                            sent = true;
                            result = true;
                        } else if (channel instanceof SocketChannel) {
                            ((SocketChannel) channel).write(packet.bytes());
                        }
                    } catch (Exception e) {
                        logger.error("An exception occurred while writing data : '{}'", e.getMessage());
                    }
                }
            }
        }

    } catch (Exception e) {
        logger.error("An exception occurred while communicating with the light : '{}'", e.getMessage());
    }

    return result;
}

From source file:org.apache.hadoop.dfs.NamenodeFsck.java

private void copyBlock(DFSClient dfs, LocatedBlock lblock, OutputStream fos) throws Exception {
    int failures = 0;
    InetSocketAddress targetAddr = null;
    TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
    Socket s = null;/*  www .j  av  a 2 s. c  o  m*/
    DFSClient.BlockReader blockReader = null;
    Block block = lblock.getBlock();

    while (s == null) {
        DatanodeInfo chosenNode;

        try {
            chosenNode = bestNode(dfs, lblock.getLocations(), deadNodes);
            targetAddr = NetUtils.createSocketAddr(chosenNode.getName());
        } catch (IOException ie) {
            if (failures >= DFSClient.MAX_BLOCK_ACQUIRE_FAILURES) {
                throw new IOException("Could not obtain block " + lblock);
            }
            LOG.info("Could not obtain block from any node:  " + ie);
            try {
                Thread.sleep(10000);
            } catch (InterruptedException iex) {
            }
            deadNodes.clear();
            failures++;
            continue;
        }
        try {
            s = new Socket();
            s.connect(targetAddr, FSConstants.READ_TIMEOUT);
            s.setSoTimeout(FSConstants.READ_TIMEOUT);

            blockReader = DFSClient.BlockReader.newBlockReader(s,
                    targetAddr.toString() + ":" + block.getBlockId(), block.getBlockId(),
                    block.getGenerationStamp(), 0, -1, conf.getInt("io.file.buffer.size", 4096));

        } catch (IOException ex) {
            // Put chosen node into dead list, continue
            LOG.info("Failed to connect to " + targetAddr + ":" + ex);
            deadNodes.add(chosenNode);
            if (s != null) {
                try {
                    s.close();
                } catch (IOException iex) {
                }
            }
            s = null;
        }
    }
    if (blockReader == null) {
        throw new Exception("Could not open data stream for " + lblock.getBlock());
    }
    byte[] buf = new byte[1024];
    int cnt = 0;
    boolean success = true;
    long bytesRead = 0;
    try {
        while ((cnt = blockReader.read(buf, 0, buf.length)) > 0) {
            fos.write(buf, 0, cnt);
            bytesRead += cnt;
        }
        if (bytesRead != block.getNumBytes()) {
            throw new IOException("Recorded block size is " + block.getNumBytes() + ", but datanode returned "
                    + bytesRead + " bytes");
        }
    } catch (Exception e) {
        e.printStackTrace();
        success = false;
    } finally {
        try {
            s.close();
        } catch (Exception e1) {
        }
    }
    if (!success)
        throw new Exception("Could not copy block data for " + lblock.getBlock());
}

From source file:org.apache.hadoop.hbase.regionserver.RSRpcServices.java

public RSRpcServices(HRegionServer rs) throws IOException {
    regionServer = rs;//from   w w w .j  av  a2s.  co  m

    RpcSchedulerFactory rpcSchedulerFactory;
    try {
        Class<?> rpcSchedulerFactoryClass = rs.conf.getClass(REGION_SERVER_RPC_SCHEDULER_FACTORY_CLASS,
                SimpleRpcSchedulerFactory.class);
        rpcSchedulerFactory = ((RpcSchedulerFactory) rpcSchedulerFactoryClass.newInstance());
    } catch (InstantiationException e) {
        throw new IllegalArgumentException(e);
    } catch (IllegalAccessException e) {
        throw new IllegalArgumentException(e);
    }
    // Server to handle client requests.
    String hostname = rs.conf.get("hbase.regionserver.ipc.address",
            Strings.domainNamePointerToHostName(
                    DNS.getDefaultHost(rs.conf.get("hbase.regionserver.dns.interface", "default"),
                            rs.conf.get("hbase.regionserver.dns.nameserver", "default"))));
    int port = rs.conf.getInt(HConstants.REGIONSERVER_PORT, HConstants.DEFAULT_REGIONSERVER_PORT);
    // Creation of a HSA will force a resolve.
    InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
    if (initialIsa.getAddress() == null) {
        throw new IllegalArgumentException("Failed resolve of " + initialIsa);
    }
    priority = new AnnotationReadingPriorityFunction(this);
    String name = rs.getProcessName() + "/" + initialIsa.toString();
    // Set how many times to retry talking to another server over HConnection.
    ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
    rpcServer = new RpcServer(rs, name, getServices(), initialIsa, // BindAddress is IP we got for this server.
            rs.conf, rpcSchedulerFactory.create(rs.conf, this));

    scannerLeaseTimeoutPeriod = rs.conf.getInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD,
            HConstants.DEFAULT_HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD);
    maxScannerResultSize = rs.conf.getLong(HConstants.HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY,
            HConstants.DEFAULT_HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE);

    // Set our address.
    isa = rpcServer.getListenerAddress();
    rpcServer.setErrorHandler(this);
    rs.setName(name);
}