Example usage for javax.servlet.http HttpServletRequest getRemotePort

List of usage examples for javax.servlet.http HttpServletRequest getRemotePort

Introduction

In this page you can find the example usage for javax.servlet.http HttpServletRequest getRemotePort.

Prototype

public int getRemotePort();

Source Link

Document

Returns the Internet Protocol (IP) source port of the client or last proxy that sent the request.

Usage

From source file:com.github.ipaas.ifw.util.IPUtil.java

/**
 * //from   w  w  w  . j  a  va 2s. c  o  m
 * ??
 * 
 * @param request
 *            http
 * @return int
 */
public static int getUserPort(HttpServletRequest request) {
    String portStr = request.getHeader("Ty_Remote_Port");
    if (StringUtil.isNullOrBlank(portStr))
        return request.getRemotePort();
    try {
        return Integer.parseInt(portStr);
    } catch (NumberFormatException e) {
        return request.getRemotePort();
    }
}

From source file:net.jadler.stubbing.server.jetty.RequestUtils.java

public static Request convert(HttpServletRequest source) throws IOException {
    String method = source.getMethod();
    URI requestUri = URI.create(source.getRequestURL() + getQueryString(source));
    InputStream body = source.getInputStream();
    InetSocketAddress localAddress = new InetSocketAddress(source.getLocalAddr(), source.getLocalPort());
    InetSocketAddress remoteAddress = new InetSocketAddress(source.getRemoteAddr(), source.getRemotePort());
    String encoding = source.getCharacterEncoding();
    Map<String, List<String>> headers = converHeaders(source);
    return new Request(method, requestUri, headers, body, localAddress, remoteAddress, encoding);
}

From source file:ddf.security.common.audit.SecurityLogger.java

private static void requestIpAndPortAndUserMessage(Subject subject, Message message,
        StringBuilder messageBuilder) {
    String user = getUser(subject);
    messageBuilder.append(SUBJECT).append(user);
    appendConditionalAttributes(subject, messageBuilder);

    if (message == null) {
        messageBuilder.append(" ");
    } else {// w  w w  . ja v a 2s .c  om
        HttpServletRequest servletRequest = (HttpServletRequest) message
                .get(AbstractHTTPDestination.HTTP_REQUEST);
        // pull out the ip and port of the incoming connection so we know
        // who is trying to get access
        if (servletRequest != null) {
            messageBuilder.append(" Request IP: ").append(servletRequest.getRemoteAddr()).append(", Port: ")
                    .append(servletRequest.getRemotePort()).append(" ");
        } else if (MessageUtils.isOutbound(message)) {
            messageBuilder.append(" Outbound endpoint: ").append(message.get(Message.ENDPOINT_ADDRESS))
                    .append(" ");
        }
    }
}

From source file:com.flexive.faces.FxJsfUtils.java

/**
 * Get the server URL like "http://www.flexive.org" without the context path
 *
 * @return server URL/* w  ww  .  j a va 2s .c om*/
 */
public static String getServerURL() {
    final FxRequest req = FxJsfUtils.getRequest();
    try {
        return req.getRequestURL().substring(0, req.getRequestURL().indexOf(req.getContextPath()));
    } catch (Exception e) {
        final HttpServletRequest r = req.getRequest();
        return r.getProtocol() + "://" + r.getRemoteHost()
                + (r.getProtocol().startsWith("http") ? "" : ":" + r.getRemotePort());
    }
}

From source file:edu.cornell.mannlib.vitro.webapp.controller.authenticate.FriendController.java

private void writeWarningToTheLog(HttpServletRequest req) {
    log.warn("LOGGING IN VIA FRIEND FROM ADDR=" + req.getRemoteAddr() + ", PORT=" + req.getRemotePort()
            + ", HOST=" + req.getRemoteHost() + ", USER=" + req.getRemoteUser());
}

From source file:com.streamsets.pipeline.lib.http.HttpReceiverServlet.java

@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
    String requestor = req.getRemoteAddr() + ":" + req.getRemotePort();
    if (isShuttingDown()) {
        LOG.debug("Shutting down, discarding incoming request from '{}'", requestor);
        resp.setStatus(HttpServletResponse.SC_GONE);
    } else {//w  ww .j a v  a 2 s .com
        if (validatePostRequest(req, resp)) {
            long start = System.currentTimeMillis();
            LOG.debug("Request accepted from '{}'", requestor);
            try (InputStream in = req.getInputStream()) {
                InputStream is = in;
                String compression = req.getHeader(HttpConstants.X_SDC_COMPRESSION_HEADER);
                if (compression == null) {
                    compression = req.getHeader(HttpConstants.CONTENT_ENCODING_HEADER);
                }
                if (compression != null) {
                    switch (compression) {
                    case HttpConstants.SNAPPY_COMPRESSION:
                        is = new SnappyFramedInputStream(is, true);
                        break;
                    case HttpConstants.GZIP_COMPRESSION:
                        is = new GZIPInputStream(is);
                        break;
                    default:
                        throw new IOException(
                                Utils.format("It shouldn't happen, unexpected compression '{}'", compression));
                    }
                }
                LOG.debug("Processing request from '{}'", requestor);
                processRequest(req, is, resp);
            } catch (Exception ex) {
                errorQueue.offer(ex);
                errorRequestMeter.mark();
                LOG.warn("Error while processing request payload from '{}': {}", requestor, ex.toString(), ex);
                resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
            } finally {
                requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
            }

        } else {
            invalidRequestMeter.mark();
        }
    }
}

From source file:com.streamsets.pipeline.stage.origin.sdcipctokafka.IpcToKafkaServlet.java

@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
    String requestor = req.getRemoteAddr() + ":" + req.getRemotePort();
    if (shuttingDown) {
        LOG.debug("Shutting down, discarding incoming request from '{}'", requestor);
        resp.setStatus(HttpServletResponse.SC_GONE);
    } else {/*from   w w  w .  j a v a  2  s .  c o  m*/
        String appId = req.getHeader(Constants.X_SDC_APPLICATION_ID_HEADER);
        String compression = req.getHeader(Constants.X_SDC_COMPRESSION_HEADER);
        String contentType = req.getContentType();
        String json1Fragmentable = req.getHeader(Constants.X_SDC_JSON1_FRAGMENTABLE_HEADER);
        if (!Constants.APPLICATION_BINARY.equals(contentType)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "Wrong content-type '{}', expected '{}'", contentType, Constants.APPLICATION_BINARY));
        } else if (!"true".equals(json1Fragmentable)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "RPC client is not using a fragmentable JSON1 encoding, client;s SDC must be upgraded"));
        } else if (!configs.appId.equals(appId)) {
            invalidRequestMeter.mark();
            LOG.warn("IPC from '{}' invalid appId '{}', rejected", requestor, appId);
            resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Invalid 'appId'");
        } else {
            long start = System.currentTimeMillis();
            LOG.debug("Request accepted from '{}'", requestor);
            try (InputStream in = req.getInputStream()) {
                InputStream is = in;
                boolean processRequest = true;
                if (compression != null) {
                    switch (compression) {
                    case Constants.SNAPPY_COMPRESSION:
                        is = new SnappyFramedInputStream(is, true);
                        break;
                    default:
                        invalidRequestMeter.mark();
                        LOG.warn("Invalid compression '{}' in request from '{}', returning error", compression,
                                requestor);
                        resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE,
                                "Unsupported compression: " + compression);
                        processRequest = false;
                    }
                }
                if (processRequest) {
                    LOG.debug("Processing request from '{}'", requestor);
                    List<byte[]> messages = SdcStreamFragmenter.fragment(is, maxMessageSize, maxRpcRequestSize);
                    LOG.debug("Request from '{}' broken into '{}' messages", requestor, messages.size());
                    long kStart = System.currentTimeMillis();
                    SdcKafkaProducer producer = getKafkaProducer();
                    long kafkaTime = System.currentTimeMillis() - kStart;
                    try {
                        for (byte[] message : messages) {
                            // we are using round robing partition strategy, partition key is ignored
                            kStart = System.currentTimeMillis();
                            producer.enqueueMessage(configs.topic, message, "");
                            kafkaTime += System.currentTimeMillis() - kStart;
                        }
                        kStart = System.currentTimeMillis();
                        producer.write();
                        kafkaTime += System.currentTimeMillis() - kStart;
                        resp.setStatus(HttpServletResponse.SC_OK);
                        requestMeter.mark();
                    } catch (StageException ex) {
                        LOG.warn("Kakfa producer error: {}", ex.toString(), ex);
                        errorQueue.offer(ex);
                        errorRequestMeter.mark();
                        LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                        resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
                    } finally {
                        kStart = System.currentTimeMillis();
                        releaseKafkaProducer(producer);
                        kafkaTime += System.currentTimeMillis() - kStart;
                    }
                    kafkaTimer.update(kafkaTime, TimeUnit.MILLISECONDS);
                    kafkaMessagesMeter.mark(messages.size());
                }
            } catch (Exception ex) {
                errorRequestMeter.mark();
                LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
            } finally {
                requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
            }
        }
    }
}

From source file:com.streamsets.pipeline.stage.origin.ipctokafka.IpcToKafkaServlet.java

@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
    String requestor = req.getRemoteAddr() + ":" + req.getRemotePort();
    if (shuttingDown) {
        LOG.debug("Shutting down, discarding incoming request from '{}'", requestor);
        resp.setStatus(HttpServletResponse.SC_GONE);
    } else {//from  w  ww.j  a  v  a2 s  .  co m
        String appId = req.getHeader(Constants.X_SDC_APPLICATION_ID_HEADER);
        String compression = req.getHeader(Constants.X_SDC_COMPRESSION_HEADER);
        String contentType = req.getContentType();
        String json1Fragmentable = req.getHeader(Constants.X_SDC_JSON1_FRAGMENTABLE_HEADER);
        if (!Constants.APPLICATION_BINARY.equals(contentType)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "Wrong content-type '{}', expected '{}'", contentType, Constants.APPLICATION_BINARY));
        } else if (!"true".equals(json1Fragmentable)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "RPC client is not using a fragmentable JSON1 encoding, client;s SDC must be upgraded"));
        } else if (!configs.appId.equals(appId)) {
            invalidRequestMeter.mark();
            LOG.warn("IPC from '{}' invalid appId '{}', rejected", requestor, appId);
            resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Invalid 'appId'");
        } else {
            long start = System.currentTimeMillis();
            LOG.debug("Request accepted from '{}'", requestor);
            try (InputStream in = req.getInputStream()) {
                InputStream is = in;
                boolean processRequest = true;
                if (compression != null) {
                    switch (compression) {
                    case Constants.SNAPPY_COMPRESSION:
                        is = new SnappyFramedInputStream(is, true);
                        break;
                    default:
                        invalidRequestMeter.mark();
                        LOG.warn("Invalid compression '{}' in request from '{}', returning error", compression,
                                requestor);
                        resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE,
                                "Unsupported compression: " + compression);
                        processRequest = false;
                    }
                }
                if (processRequest) {
                    LOG.debug("Processing request from '{}'", requestor);
                    List<byte[]> messages = SdcStreamFragmenter.fragment(is, maxMessageSize, maxRpcRequestSize);
                    LOG.debug("Request from '{}' broken into '{}' messages", requestor, messages.size());
                    long kStart = System.currentTimeMillis();
                    SdcKafkaProducer producer = getKafkaProducer();
                    long kafkaTime = System.currentTimeMillis() - kStart;
                    try {
                        for (byte[] message : messages) {
                            // we are using round robing partition strategy, partition key is ignored
                            kStart = System.currentTimeMillis();
                            producer.enqueueMessage(kafkaConfigBean.kafkaConfig.topic, message, "");
                            kafkaTime += System.currentTimeMillis() - kStart;
                        }
                        kStart = System.currentTimeMillis();
                        producer.write();
                        kafkaTime += System.currentTimeMillis() - kStart;
                        resp.setStatus(HttpServletResponse.SC_OK);
                        requestMeter.mark();
                    } catch (StageException ex) {
                        LOG.warn("Kakfa producer error: {}", ex.toString(), ex);
                        errorQueue.offer(ex);
                        errorRequestMeter.mark();
                        LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                        resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
                    } finally {
                        kStart = System.currentTimeMillis();
                        releaseKafkaProducer(producer);
                        kafkaTime += System.currentTimeMillis() - kStart;
                    }
                    kafkaTimer.update(kafkaTime, TimeUnit.MILLISECONDS);
                    kafkaMessagesMeter.mark(messages.size());
                }
            } catch (Exception ex) {
                errorRequestMeter.mark();
                LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
            } finally {
                requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
            }
        }
    }
}

From source file:com.haulmont.cuba.core.sys.remoting.RemotingServlet.java

@Override
protected void doService(HttpServletRequest request, HttpServletResponse response) throws Exception {
    checkConfiguration(request);/* ww  w.j a va2  s .  c  o  m*/

    RemoteClientInfo remoteClientInfo = new RemoteClientInfo();
    remoteClientInfo.setAddress(request.getRemoteAddr());
    remoteClientInfo.setHost(request.getRemoteHost());
    remoteClientInfo.setPort(request.getRemotePort());

    RemoteClientInfo.set(remoteClientInfo);
    try {
        super.doService(request, response);
    } finally {
        RemoteClientInfo.clear();
    }
}

From source file:eu.fusepool.p3.webid.proxy.ProxyServlet.java

/**
 * The service method from HttpServlet, performs handling of all
 * HTTP-requests independent of their method. Requests and responses within
 * the method can be distinguished by belonging to the "frontend" (i.e. the
 * client connecting to the proxy) or the "backend" (the server being
 * contacted on behalf of the client)/*from  www . j a va2 s  .  com*/
 *
 * @param frontendRequest Request coming in from the client
 * @param frontendResponse Response being returned to the client
 * @throws ServletException
 * @throws IOException
 */
@Override
protected void service(final HttpServletRequest frontendRequest, final HttpServletResponse frontendResponse)
        throws ServletException, IOException {
    log(LogService.LOG_INFO,
            "Proxying request: " + frontendRequest.getRemoteAddr() + ":" + frontendRequest.getRemotePort()
                    + " (" + frontendRequest.getHeader("Host") + ") " + frontendRequest.getMethod() + " "
                    + frontendRequest.getRequestURI());

    if (targetBaseUri == null) {
        // FIXME return status page
        return;
    }

    //////////////////// Setup backend request
    final HttpEntityEnclosingRequestBase backendRequest = new HttpEntityEnclosingRequestBase() {
        @Override
        public String getMethod() {
            return frontendRequest.getMethod();
        }
    };
    try {
        backendRequest.setURI(new URL(targetBaseUri + frontendRequest.getRequestURI()).toURI());
    } catch (URISyntaxException ex) {
        throw new IOException(ex);
    }

    //////////////////// Copy headers to backend request
    final Enumeration<String> frontendHeaderNames = frontendRequest.getHeaderNames();
    while (frontendHeaderNames.hasMoreElements()) {
        final String headerName = frontendHeaderNames.nextElement();
        final Enumeration<String> headerValues = frontendRequest.getHeaders(headerName);
        while (headerValues.hasMoreElements()) {
            final String headerValue = headerValues.nextElement();
            if (!headerName.equalsIgnoreCase("Content-Length")) {
                backendRequest.setHeader(headerName, headerValue);
            }
        }
    }

    //////////////////// Copy Entity - if any
    final byte[] inEntityBytes = IOUtils.toByteArray(frontendRequest.getInputStream());
    if (inEntityBytes.length > 0) {
        backendRequest.setEntity(new ByteArrayEntity(inEntityBytes));
    }

    //////////////////// Execute request to backend
    try (CloseableHttpResponse backendResponse = httpclient.execute(backendRequest)) {
        frontendResponse.setStatus(backendResponse.getStatusLine().getStatusCode());

        // Copy back headers
        final Header[] backendHeaders = backendResponse.getAllHeaders();
        final Set<String> backendHeaderNames = new HashSet<>(backendHeaders.length);
        for (Header header : backendHeaders) {
            if (backendHeaderNames.add(header.getName())) {
                frontendResponse.setHeader(header.getName(), header.getValue());
            } else {
                frontendResponse.addHeader(header.getName(), header.getValue());
            }
        }

        final ServletOutputStream outStream = frontendResponse.getOutputStream();

        // Copy back entity
        final HttpEntity entity = backendResponse.getEntity();
        if (entity != null) {
            try (InputStream inStream = entity.getContent()) {
                IOUtils.copy(inStream, outStream);
            }
        }
        outStream.flush();
    }
}