Example usage for javax.servlet.http HttpServletResponse SC_UNSUPPORTED_MEDIA_TYPE

List of usage examples for javax.servlet.http HttpServletResponse SC_UNSUPPORTED_MEDIA_TYPE

Introduction

In this page you can find the example usage for javax.servlet.http HttpServletResponse SC_UNSUPPORTED_MEDIA_TYPE.

Prototype

int SC_UNSUPPORTED_MEDIA_TYPE

To view the source code for javax.servlet.http HttpServletResponse SC_UNSUPPORTED_MEDIA_TYPE.

Click Source Link

Document

Status code (415) indicating that the server is refusing to service the request because the entity of the request is in a format not supported by the requested resource for the requested method.

Usage

From source file:fr.gael.dhus.api.UploadController.java

@SuppressWarnings("unchecked")
@PreAuthorize("hasRole('ROLE_UPLOAD')")
@RequestMapping(value = "/upload", method = { RequestMethod.POST })
public void upload(Principal principal, HttpServletRequest req, HttpServletResponse res) throws IOException {
    // process only multipart requests
    if (ServletFileUpload.isMultipartContent(req)) {
        User user = (User) ((UsernamePasswordAuthenticationToken) principal).getPrincipal();
        // Create a factory for disk-based file items
        FileItemFactory factory = new DiskFileItemFactory();
        // Create a new file upload handler
        ServletFileUpload upload = new ServletFileUpload(factory);

        // Parse the request
        try {//from   w w  w .j a va 2 s . c o  m
            ArrayList<Long> collectionIds = new ArrayList<>();
            FileItem product = null;

            List<FileItem> items = upload.parseRequest(req);
            for (FileItem item : items) {
                if (COLLECTIONSKEY.equals(item.getFieldName())) {
                    if (item.getString() != null && !item.getString().isEmpty()) {
                        for (String cid : item.getString().split(",")) {
                            collectionIds.add(new Long(cid));
                        }
                    }
                } else if (PRODUCTKEY.equals(item.getFieldName())) {
                    product = item;
                }
            }
            if (product == null) {
                res.sendError(HttpServletResponse.SC_BAD_REQUEST,
                        "Your request is missing a product file to upload.");
                return;
            }
            productUploadService.upload(user.getId(), product, collectionIds);
            res.setStatus(HttpServletResponse.SC_CREATED);
            res.getWriter().print("The file was created successfully.");
            res.flushBuffer();
        } catch (FileUploadException e) {
            logger.error("An error occurred while parsing request.", e);
            res.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
                    "An error occurred while parsing request : " + e.getMessage());
        } catch (UserNotExistingException e) {
            logger.error("You need to be connected to upload a product.", e);
            res.sendError(HttpServletResponse.SC_UNAUTHORIZED, "You need to be connected to upload a product.");
        } catch (UploadingException e) {
            logger.error("An error occurred while uploading the product.", e);
            res.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
                    "An error occurred while uploading the product : " + e.getMessage());
        } catch (RootNotModifiableException e) {
            logger.error("An error occurred while uploading the product.", e);
            res.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR,
                    "An error occurred while uploading the product : " + e.getMessage());
        } catch (ProductNotAddedException e) {
            logger.error("Your product can not be read by the system.", e);
            res.sendError(HttpServletResponse.SC_NOT_ACCEPTABLE, "Your product can not be read by the system.");
        }
    } else {
        res.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE,
                "Request contents type is not supported by the servlet.");
    }
}

From source file:com.streamsets.pipeline.lib.http.HttpReceiverServlet.java

@VisibleForTesting
boolean validatePostRequest(HttpServletRequest req, HttpServletResponse res)
        throws ServletException, IOException {
    boolean valid = false;
    if (validateAppId(req, res)) {
        String compression = req.getHeader(HttpConstants.X_SDC_COMPRESSION_HEADER);
        if (compression == null) {
            valid = true;/*from  www  .j a va2 s  . co m*/
        } else {
            switch (compression) {
            case HttpConstants.SNAPPY_COMPRESSION:
                valid = true;
                break;
            default:
                String requestor = req.getRemoteAddr() + ":" + req.getRemotePort();
                LOG.warn("Invalid compression '{}' in request from '{}', returning error", compression,
                        requestor);
                res.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE,
                        "Unsupported compression: " + compression);
                break;
            }
        }
    }
    return valid && getReceiver().validate(req, res);
}

From source file:com.novartis.pcs.ontology.rest.servlet.OntologiesServlet.java

@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    String mediaType = getExpectedMediaType(request);
    String pathInfo = StringUtils.trimToNull(request.getPathInfo());
    boolean includeNonPublicXrefs = Boolean
            .parseBoolean(StringUtils.trimToNull(request.getParameter("nonpublic-xrefs")));
    if (mediaType == null) {
        response.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
        response.setContentLength(0);//from w  w w.  j  av  a  2  s . co  m
    } else if (pathInfo != null && pathInfo.length() > 1) {
        String ontologyName = pathInfo.substring(1);
        if (mediaType.equals(MEDIA_TYPE_JSON)) {
            serialize(ontologyName, response);
        } else {
            export(ontologyName, includeNonPublicXrefs, mediaType, response);
        }
    } else {
        mediaType = getExpectedMediaType(request, Collections.singletonList(MEDIA_TYPE_JSON));
        if (mediaType.equals(MEDIA_TYPE_JSON)) {
            serializeAll(response);
        } else {
            response.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
            response.setContentLength(0);
        }
    }
}

From source file:de.betterform.agent.web.resources.ResourceServlet.java

@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
    String requestUri = req.getRequestURI();
    String resourcePath = RESOURCE_FOLDER + getResourcePath(requestUri);
    URL url = ResourceServlet.class.getResource(resourcePath);
    if (LOG.isTraceEnabled()) {
        LOG.trace("Request URI: " + requestUri);
        LOG.trace("resource fpath: " + resourcePath);
    }//from   w  ww  .ja va2s  . c  o  m

    if (url == null) {
        boolean error = true;

        if (requestUri.endsWith(".js")) {
            //try optimized version first
            if (requestUri.contains("scripts/betterform/betterform-")) {
                if (ResourceServlet.class.getResource(resourcePath) == null) {
                    resourcePath = resourcePath.replace("betterform-", "BfRequired");
                    if (ResourceServlet.class.getResource(resourcePath) != null) {
                        error = false;
                    }
                }
            }
        }

        if (error) {
            if (LOG.isWarnEnabled()) {
                LOG.warn("Resource " + resourcePath + " not found");
            }
            resp.sendError(HttpServletResponse.SC_NOT_FOUND, "Resource " + resourcePath + " not found");
            return;
        }

    }

    if (LOG.isTraceEnabled()) {
        LOG.trace("Streaming resource " + resourcePath);
    }

    InputStream inputStream = null;

    try {
        if (exploded) {
            String path = ResourceServlet.class.getResource(resourcePath).getPath();
            inputStream = new FileInputStream(new File(path));
            if (LOG.isTraceEnabled()) {
                LOG.trace("loading reources form file: " + path);
            }
        } else {
            inputStream = ResourceServlet.class.getResourceAsStream(resourcePath);
        }

        String mimeType = getResourceContentType(resourcePath);
        if (mimeType == null) {
            mimeType = getServletContext().getMimeType(resourcePath);
        }

        if (mimeType == null) {
            if (LOG.isTraceEnabled()) {
                LOG.trace("MimeType for " + resourcePath + " not found. Sending 'not found' response");
            }
            resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE,
                    "MimeType for " + resourcePath + " not found. Sending 'not found' response");
            return;
        }

        resp.setContentType(mimeType);
        resp.setStatus(HttpServletResponse.SC_OK);
        setCaching(req, resp);
        streamResource(req, resp, mimeType, inputStream);

        if (LOG.isTraceEnabled()) {
            LOG.trace("Resource " + resourcePath + " streamed succesfully");
        }
    } catch (Exception exception) {
        LOG.error("Error in streaming resource " + resourcePath + ". Exception is " + exception.getMessage());
    } finally {
        if (inputStream != null) {
            inputStream.close();
        }

        resp.getOutputStream().flush();
        resp.getOutputStream().close();
    }
}

From source file:com.streamsets.pipeline.stage.origin.sdcipctokafka.IpcToKafkaServlet.java

@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
    String requestor = req.getRemoteAddr() + ":" + req.getRemotePort();
    if (shuttingDown) {
        LOG.debug("Shutting down, discarding incoming request from '{}'", requestor);
        resp.setStatus(HttpServletResponse.SC_GONE);
    } else {//from  w  w w. j  a  v  a 2 s  .c  o m
        String appId = req.getHeader(Constants.X_SDC_APPLICATION_ID_HEADER);
        String compression = req.getHeader(Constants.X_SDC_COMPRESSION_HEADER);
        String contentType = req.getContentType();
        String json1Fragmentable = req.getHeader(Constants.X_SDC_JSON1_FRAGMENTABLE_HEADER);
        if (!Constants.APPLICATION_BINARY.equals(contentType)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "Wrong content-type '{}', expected '{}'", contentType, Constants.APPLICATION_BINARY));
        } else if (!"true".equals(json1Fragmentable)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "RPC client is not using a fragmentable JSON1 encoding, client;s SDC must be upgraded"));
        } else if (!configs.appId.equals(appId)) {
            invalidRequestMeter.mark();
            LOG.warn("IPC from '{}' invalid appId '{}', rejected", requestor, appId);
            resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Invalid 'appId'");
        } else {
            long start = System.currentTimeMillis();
            LOG.debug("Request accepted from '{}'", requestor);
            try (InputStream in = req.getInputStream()) {
                InputStream is = in;
                boolean processRequest = true;
                if (compression != null) {
                    switch (compression) {
                    case Constants.SNAPPY_COMPRESSION:
                        is = new SnappyFramedInputStream(is, true);
                        break;
                    default:
                        invalidRequestMeter.mark();
                        LOG.warn("Invalid compression '{}' in request from '{}', returning error", compression,
                                requestor);
                        resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE,
                                "Unsupported compression: " + compression);
                        processRequest = false;
                    }
                }
                if (processRequest) {
                    LOG.debug("Processing request from '{}'", requestor);
                    List<byte[]> messages = SdcStreamFragmenter.fragment(is, maxMessageSize, maxRpcRequestSize);
                    LOG.debug("Request from '{}' broken into '{}' messages", requestor, messages.size());
                    long kStart = System.currentTimeMillis();
                    SdcKafkaProducer producer = getKafkaProducer();
                    long kafkaTime = System.currentTimeMillis() - kStart;
                    try {
                        for (byte[] message : messages) {
                            // we are using round robing partition strategy, partition key is ignored
                            kStart = System.currentTimeMillis();
                            producer.enqueueMessage(configs.topic, message, "");
                            kafkaTime += System.currentTimeMillis() - kStart;
                        }
                        kStart = System.currentTimeMillis();
                        producer.write();
                        kafkaTime += System.currentTimeMillis() - kStart;
                        resp.setStatus(HttpServletResponse.SC_OK);
                        requestMeter.mark();
                    } catch (StageException ex) {
                        LOG.warn("Kakfa producer error: {}", ex.toString(), ex);
                        errorQueue.offer(ex);
                        errorRequestMeter.mark();
                        LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                        resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
                    } finally {
                        kStart = System.currentTimeMillis();
                        releaseKafkaProducer(producer);
                        kafkaTime += System.currentTimeMillis() - kStart;
                    }
                    kafkaTimer.update(kafkaTime, TimeUnit.MILLISECONDS);
                    kafkaMessagesMeter.mark(messages.size());
                }
            } catch (Exception ex) {
                errorRequestMeter.mark();
                LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
            } finally {
                requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
            }
        }
    }
}

From source file:com.github.woonsan.katharsis.servlet.KatharsisServletTest.java

@Test
public void testUnacceptableRequestContentType() throws Exception {
    MockHttpServletRequest request = new MockHttpServletRequest(servletContext);
    request.setMethod("GET");
    request.setContextPath("");
    request.setServletPath("/api");
    request.setPathInfo("/tasks");
    request.setRequestURI("/api/tasks");
    request.setContentType(JsonApiMediaType.APPLICATION_JSON_API);
    request.addHeader("Accept", "application/xml");
    request.addParameter("filter", "{\"name\":\"John\"}");

    MockHttpServletResponse response = new MockHttpServletResponse();

    katharsisServlet.service(request, response);

    assertEquals(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, response.getStatus());
    String responseContent = response.getContentAsString();
    assertTrue(responseContent == null || "".equals(responseContent.trim()));
}

From source file:com.streamsets.pipeline.stage.origin.ipctokafka.IpcToKafkaServlet.java

@Override
protected void doPost(HttpServletRequest req, HttpServletResponse resp) throws ServletException, IOException {
    String requestor = req.getRemoteAddr() + ":" + req.getRemotePort();
    if (shuttingDown) {
        LOG.debug("Shutting down, discarding incoming request from '{}'", requestor);
        resp.setStatus(HttpServletResponse.SC_GONE);
    } else {//from   ww  w . j a  v a  2 s .  c  om
        String appId = req.getHeader(Constants.X_SDC_APPLICATION_ID_HEADER);
        String compression = req.getHeader(Constants.X_SDC_COMPRESSION_HEADER);
        String contentType = req.getContentType();
        String json1Fragmentable = req.getHeader(Constants.X_SDC_JSON1_FRAGMENTABLE_HEADER);
        if (!Constants.APPLICATION_BINARY.equals(contentType)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "Wrong content-type '{}', expected '{}'", contentType, Constants.APPLICATION_BINARY));
        } else if (!"true".equals(json1Fragmentable)) {
            invalidRequestMeter.mark();
            resp.sendError(HttpServletResponse.SC_BAD_REQUEST, Utils.format(
                    "RPC client is not using a fragmentable JSON1 encoding, client;s SDC must be upgraded"));
        } else if (!configs.appId.equals(appId)) {
            invalidRequestMeter.mark();
            LOG.warn("IPC from '{}' invalid appId '{}', rejected", requestor, appId);
            resp.sendError(HttpServletResponse.SC_FORBIDDEN, "Invalid 'appId'");
        } else {
            long start = System.currentTimeMillis();
            LOG.debug("Request accepted from '{}'", requestor);
            try (InputStream in = req.getInputStream()) {
                InputStream is = in;
                boolean processRequest = true;
                if (compression != null) {
                    switch (compression) {
                    case Constants.SNAPPY_COMPRESSION:
                        is = new SnappyFramedInputStream(is, true);
                        break;
                    default:
                        invalidRequestMeter.mark();
                        LOG.warn("Invalid compression '{}' in request from '{}', returning error", compression,
                                requestor);
                        resp.sendError(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE,
                                "Unsupported compression: " + compression);
                        processRequest = false;
                    }
                }
                if (processRequest) {
                    LOG.debug("Processing request from '{}'", requestor);
                    List<byte[]> messages = SdcStreamFragmenter.fragment(is, maxMessageSize, maxRpcRequestSize);
                    LOG.debug("Request from '{}' broken into '{}' messages", requestor, messages.size());
                    long kStart = System.currentTimeMillis();
                    SdcKafkaProducer producer = getKafkaProducer();
                    long kafkaTime = System.currentTimeMillis() - kStart;
                    try {
                        for (byte[] message : messages) {
                            // we are using round robing partition strategy, partition key is ignored
                            kStart = System.currentTimeMillis();
                            producer.enqueueMessage(kafkaConfigBean.kafkaConfig.topic, message, "");
                            kafkaTime += System.currentTimeMillis() - kStart;
                        }
                        kStart = System.currentTimeMillis();
                        producer.write();
                        kafkaTime += System.currentTimeMillis() - kStart;
                        resp.setStatus(HttpServletResponse.SC_OK);
                        requestMeter.mark();
                    } catch (StageException ex) {
                        LOG.warn("Kakfa producer error: {}", ex.toString(), ex);
                        errorQueue.offer(ex);
                        errorRequestMeter.mark();
                        LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                        resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
                    } finally {
                        kStart = System.currentTimeMillis();
                        releaseKafkaProducer(producer);
                        kafkaTime += System.currentTimeMillis() - kStart;
                    }
                    kafkaTimer.update(kafkaTime, TimeUnit.MILLISECONDS);
                    kafkaMessagesMeter.mark(messages.size());
                }
            } catch (Exception ex) {
                errorRequestMeter.mark();
                LOG.warn("Error while reading payload from '{}': {}", requestor, ex.toString(), ex);
                resp.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, ex.toString());
            } finally {
                requestTimer.update(System.currentTimeMillis() - start, TimeUnit.MILLISECONDS);
            }
        }
    }
}

From source file:com.github.woonsan.katharsis.servlet.KatharsisFilterTest.java

@Test
public void testUnacceptableRequestContentType() throws Exception {
    MockFilterChain filterChain = new MockFilterChain();

    MockHttpServletRequest request = new MockHttpServletRequest(servletContext);
    request.setMethod("GET");
    request.setContextPath("");
    request.setServletPath(null);/*w ww. j a  v a2  s  . c  o  m*/
    request.setPathInfo(null);
    request.setRequestURI("/api/tasks/");
    request.setContentType(JsonApiMediaType.APPLICATION_JSON_API);
    request.addHeader("Accept", "application/xml");

    MockHttpServletResponse response = new MockHttpServletResponse();

    katharsisFilter.doFilter(request, response, filterChain);

    assertEquals(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE, response.getStatus());
    String responseContent = response.getContentAsString();
    assertTrue(responseContent == null || "".equals(responseContent.trim()));
}

From source file:com.novartis.pcs.ontology.rest.servlet.OntologiesServlet.java

@Override
protected void doPut(HttpServletRequest request, HttpServletResponse response)
        throws ServletException, IOException {
    String mediaType = StringUtils.trimToNull(request.getContentType());
    String encoding = StringUtils.trimToNull(request.getCharacterEncoding());
    String pathInfo = StringUtils.trimToNull(request.getPathInfo());
    Curator curator = loadCurator(request);

    if (mediaType != null && mediaType.indexOf(';') > 0) {
        mediaType = mediaType.substring(0, mediaType.indexOf(';'));
    }/*ww  w  .j a va 2  s  .co m*/

    if (!StringUtils.equalsIgnoreCase(mediaType, MEDIA_TYPE_OBO)
            || !StringUtils.equalsIgnoreCase(encoding, "utf-8")) {
        log("Failed to import ontology: invalid media type or encoding " + mediaType + ";charset=" + encoding);
        response.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
    } else if (pathInfo == null || pathInfo.length() <= 1) {
        log("Failed to import ontology: ontology name not include in path");
        response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
    } else if (curator == null) {
        log("Failed to import ontology: curator not found in request");
        response.setStatus(HttpServletResponse.SC_FORBIDDEN);
    } else {
        try {
            String ontologyName = pathInfo.substring(1);
            importService.importOntology(ontologyName, request.getInputStream(), curator);
            response.setStatus(HttpServletResponse.SC_OK);
            response.setHeader("Access-Control-Allow-Origin", "*");
            response.setHeader("Cache-Control", "public, max-age=0");
        } catch (DuplicateEntityException e) {
            log("Failed to import ontology: duplicate term", e);
            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
        } catch (InvalidEntityException e) {
            log("Failed to import ontology: invalid entity", e);
            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
        } catch (InvalidFormatException e) {
            log("Failed to import ontology: invalid format", e);
            response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
        } catch (Exception e) {
            log("Failed to import ontology: system error", e);
            response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
        }
    }
    response.setContentLength(0);
}

From source file:com.redhat.jenkins.nodesharingfrontend.Api.java

/**
 * Request to utilize reserved computer.
 *
 * Response codes://ww  w  .ja  v a2  s  .  com
 * - "200 OK" is used when the node was accepted, the node is expected to be correctly added to Jenkins by the time
 *   the request completes with the code. The code is also returned when the node is already helt by this executor.
 * - "410 Gone" when there is no longer the need for such host and orchestrator can reuse it immediately. The node must not be created.
 */
@RequirePOST
public void doUtilizeNode(@Nonnull final StaplerRequest req, @Nonnull final StaplerResponse rsp)
        throws IOException {
    final Jenkins jenkins = Jenkins.getActiveInstance();
    jenkins.checkPermission(RestEndpoint.RESERVE);

    UtilizeNodeRequest request = Entity.fromInputStream(req.getInputStream(), UtilizeNodeRequest.class);
    final NodeDefinition definition = NodeDefinition.create(request.getFileName(), request.getDefinition());
    if (definition == null)
        throw new AssertionError("Unknown node definition: " + request.getFileName());

    final String name = definition.getName();

    // utilizeNode call received even though the node is already being utilized
    Node node = getCollidingNode(jenkins, name);
    if (node != null) {
        new UtilizeNodeResponse(fingerprint).toOutputStream(rsp.getOutputStream());
        rsp.setStatus(HttpServletResponse.SC_OK);
        LOGGER.warning("Skipping node addition as it already exists");
        return;
    }

    // Do not accept the node when there is no load for it
    if (!isThereAWorkloadFor(jenkins, definition)) {
        rsp.setStatus(HttpServletResponse.SC_GONE);
        LOGGER.info("Skipping node addition as there isn't a workload for it");
        return;
    }

    try {
        final SharedNode newNode = cloud.createNode(definition);
        // Prevent replacing existing node due to a race condition in repeated utilizeNode calls
        Queue.withLock(new NotReallyRoleSensitiveCallable<Void, IOException>() {
            @Override
            public Void call() throws IOException {
                Node node = getCollidingNode(jenkins, name);
                if (node == null) {
                    jenkins.addNode(newNode);
                } else {
                    LOGGER.warning("Skipping node addition due to race condition");
                }
                return null;
            }
        });

        new UtilizeNodeResponse(fingerprint).toOutputStream(rsp.getOutputStream());
        rsp.setStatus(HttpServletResponse.SC_OK);
    } catch (IllegalArgumentException e) {
        e.printStackTrace(new PrintStream(rsp.getOutputStream()));
        rsp.setStatus(HttpServletResponse.SC_UNSUPPORTED_MEDIA_TYPE);
    }
}