Example usage for org.apache.http.entity ContentType create

List of usage examples for org.apache.http.entity ContentType create

Introduction

In this page you can find the example usage for org.apache.http.entity ContentType create.

Prototype

private static ContentType create(HeaderElement headerElement) 

Source Link

Usage

From source file:com.sangupta.jerry.http.WebRequest.java

/**
* Set the body from the string for given mime-type.
* 
* @param string/*from   w  w w  .  ja  v a 2s . c o  m*/
*            the string to set body from
* 
* @param mimeType
*            the MIME type of the string
* 
* @return this very {@link WebRequest}
*/
public WebRequest bodyString(final String string, final String mimeType) {
    ContentType contentType = ContentType.create(mimeType);
    return body(new StringEntity(string, contentType));
}

From source file:com.esri.geoevent.datastore.GeoEventDataStoreProxy.java

private HttpPut createPutRequest(URI uri, byte[] putBody, String contentTypes) {
    HttpPut httpPut = new HttpPut(uri);

    ContentType contentType = ContentType.create(contentTypes);
    if (contentType == null)
        throw new RuntimeException("Couldn't create content types for " + contentTypes);

    ByteArrayEntity entity = new ByteArrayEntity(putBody, contentType);
    httpPut.setEntity(entity);/*from  w ww  . jav  a  2  s  .c  o m*/

    return httpPut;
}

From source file:com.naryx.tagfusion.cfm.http.cfHttpConnection.java

private void addFiles() throws cfmRunTimeException {
    List<fileDescriptor> files = httpData.getFiles();

    if (files.size() > 0) {

        if (message instanceof HttpPost && (isMultipart || httpData.getFiles().size() > 0)) {

            if (multipartEntityBuilder == null)
                multipartEntityBuilder = MultipartEntityBuilder.create().setCharset(charset);

            for (int i = 0; i < files.size(); i++) {
                fileDescriptor nextFile = files.get(i);
                multipartEntityBuilder.addPart(nextFile.getName(), new FileBody(nextFile.getFile(),
                        ContentType.create(nextFile.getMimeType()), nextFile.getFile().getName()));
            }/*  w  w  w. jav  a  2 s .com*/

        } else if (message instanceof HttpPut) {
            fileDescriptor nextFile = files.get(0); // just use the first file specified
            try {
                FileInputStream fileIn = new FileInputStream(nextFile.getFile());
                InputStreamEntity entity = new InputStreamEntity(fileIn, nextFile.getFile().length(),
                        ContentType.create(nextFile.getMimeType()));
                ((HttpPut) message).setEntity(entity);
            } catch (FileNotFoundException e) {
                throw newRunTimeException("Failed to locate file " + nextFile.getFile().getAbsolutePath());
            }

        }
    }
}

From source file:com.esri.geoevent.datastore.GeoEventDataStoreProxy.java

private HttpPost createPostRequest(URI uri, String postBody, String contentTypes, ServerInfo serverInfo) {
    HttpPost httpPost = new HttpPost(uri);
    ContentType contentType = ContentType.create(contentTypes);
    if (contentType == null)
        throw new RuntimeException("Couldn't create content types for " + contentTypes);

    if (ContentType.APPLICATION_FORM_URLENCODED.getMimeType().equals(contentType.getMimeType())) {
        String tokenToUse = null;
        if (serverInfo.encryptedToken != null) {
            try {
                tokenToUse = Crypto.doDecrypt(serverInfo.encryptedToken);
            } catch (GeneralSecurityException e) {
                throw new RuntimeException(e);
            }//w w w. j av a 2 s  .  c  o m
        }
        List<NameValuePair> params = parseQueryStringAndAddToken(postBody, tokenToUse);
        postBody = URLEncodedUtils.format(params, "UTF-8");
    }

    StringEntity entity = new StringEntity(postBody, contentType);
    httpPost.setEntity(entity);

    return httpPost;
}

From source file:nzilbb.bas.BAS.java

/**
 * Invokes the G2P service for converting orthography into phonemic transcription.
 * @param lng <a href="https://tools.ietf.org/html/rfc5646">RFC 5646</a> tag for identifying the language.
 * @param i The text to transform.// w w w  . ja  v a2  s .  co  m
 * @param iform The format of <var>i</var> -
 * <ul>
 *  <li>"txt" indicates connected text input, which will be tokenized before the conversion.</li> 
 *  <li>"list" indicates a sequence of unconnected words, that does not need to be tokenized. Furthermore, "list" requires a different part-of-speech tagging strategy than "txt" for the extraction of the "extended" feature set (see Parameter <var>featset</var>).</li> 
 *  <li>"tg" indicates TextGrid input. Long and short format is supported. For TextGrid input additionally the name of the item containing the words to be transcribed is to be specified by the parameter "tgname". In combination with "bpf" output format "tg" input additionally requires the specification of the sample rate by the parameter "tgrate".</li>
 *  <li>"tcf" indicates, that the input format is TCF containing at least a tokenization dominated by the element "tokens".</li> 
 *  <li>Input format "bpf" indicates BAS partitur file input containing an ORT tier to be transcribed.</li>
 * </ul>
 * @param tgitem Only needed, if <var>iform</var> is "tg". Name of the TextGrid item, that contains the words to be transcribed. In case of TextGrid output, this item is the reference for the added items. 
 * @param tgrate Only needed, if <var>iform</var> is "tg" and oform is "bpf(s)". Sample rate to convert time values from TextGrid to sample values in BAS partiture file. 
 * @param outsym Ouput phoneme symbol inventory:
 *  <ul>
 *   <li>"sampa" - language-specific SAMPA variant is the default.</li> 
 *   <li>"x-sampa" - language independent X-SAMPA and IPA can be chosen.</li> 
 *   <li>"maus-sampa" - maps the output to a language-specific phoneme subset that WEBMAUS can process.</li> 
 *   <li>"ipa" - Unicode-encoded IPA.</li> 
 *   <li>"arpabet" - supported for eng-US only</li>
 * </ul>
 * @param featset - Feature set used for grapheme-phoneme conversion. 
 *  <ul>
 *   <li>"standard" comprises a letter window centered on the grapheme to be converted.</li> 
 *   <li>"extended" set additionally includes part of speech and morphological analyses.</li>
 *  </ul>
 * @param oform Output format:
 *  <ul>
 *   <li>"bpf" indicates the BAS Partitur Format (BPF) file with a KAN tier.</li> 
 *   <li>"bpfs" differs from "bpf" only in that respect, that the phonemes are separated by blanks. In case of TextGrid input, both "bpf" and "bpfs" require the additional parameters "tgrate" and "tgitem". The content of the TextGrid tier "tgitem" is stored as a word chunk segmentation in the partiture tier TRN.</li> 
 *   <li>"txt" indicates a replacement of the input words by their transcriptions; single line output without punctuation, where phonemes are separated by blanks and words by tabulators.</li> 
 *   <li>"tab" returns the grapheme phoneme conversion result in form of a table with two columns. The first column comprises the words, the second column their blank-separated transcriptions.</li> 
 *   <li>"exttab" results in a 5-column table. The columns contain from left to right: words, transcriptions, part of speech, morpheme segmentations, and morpheme class segmentations.</li> 
 *   <li>"lex" transforms the table to a lexicon, i.e. words are unique and sorted.</li> 
 *   <li>"extlex" provides the same information as "exttab" in a unique and sorted manner. For all lex and tab outputs columns are separated by ';'.</li> 
 *   <li>"exttcf" which is currently available for German and English only additionally adds part of speech (STTS tagset), morphs, and morph classes.</li>
 *   <li>With "tg" and "exttg" TextGrid output is produced.</li>
 *  </ul>
 * @param syl whether or not word stress is to be added to the output transcription. 
 * @param stress whether or not the output transcription is to be syllabified. 
 * @param nrm Detects and expands 22 non-standard word types.
 * @param com whether &lt;*&gt; strings should be treated as annotation markers. If true, then strings of this type are considered as annotation markers that are not processed but passed on to the output.
 * @param align "yes", "no", or "sym" decision whether or not the transcription is to be letter-aligned. Syllable boundaries and word stress are not part of the output of this 'sym' alignment.
 * @return The result of this call.
 * @throws IOException If an IO error occurs.
 * @throws ParserConfigurationException If the XML parser for parsing the response could not be configured.
 */
public BASResponse G2P(String lng, InputStream i, String iform, String tgitem, int tgrate, String outsym,
        String featset, String oform, boolean syl, boolean stress, boolean nrm, boolean com, String align)
        throws IOException, ParserConfigurationException {
    HttpPost request = new HttpPost(getG2PUrl());
    HttpEntity entity = MultipartEntityBuilder.create().addTextBody("lng", languageTagger.tag(lng))
            .addBinaryBody("i", i, ContentType.create("text/plain"), "BAS." + iform).addTextBody("iform", iform)
            .addTextBody("tgitem", tgitem).addTextBody("tgrate", "" + tgrate).addTextBody("outsym", outsym)
            .addTextBody("featset", featset).addTextBody("oform", oform).addTextBody("syl", syl ? "yes" : "no")
            .addTextBody("stress", stress ? "yes" : "no").addTextBody("nrm", nrm ? "yes" : "no")
            .addTextBody("com", nrm ? "yes" : "no").addTextBody("align", align).build();
    request.setEntity(entity);
    HttpResponse httpResponse = httpclient.execute(request);
    HttpEntity result = httpResponse.getEntity();
    return new BASResponse(result.getContent());
}

From source file:com.cisco.oss.foundation.http.netlifx.apache.ApacheNetflixHttpClient.java

private org.apache.http.HttpRequest buildHttpUriRequest(com.netflix.client.http.HttpRequest request,
        Joiner joiner, URI requestUri) {

    org.apache.http.HttpRequest httpRequest;
    if (autoEncodeUri) {
        switch (request.getVerb()) {
        case GET:
            httpRequest = new HttpGet(requestUri);
            break;
        case POST:
            httpRequest = new HttpPost(requestUri);
            break;
        case PUT:
            httpRequest = new HttpPut(requestUri);
            break;
        case DELETE:
            httpRequest = new HttpDelete(requestUri);
            break;
        case HEAD:
            httpRequest = new HttpHead(requestUri);
            break;
        case OPTIONS:
            httpRequest = new HttpOptions(requestUri);
            break;
        //                    case PATCH:
        //                        httpRequest = new HttpPatch(requestUri);
        //                        break;
        default:/*  w  w w .  j a  va2 s  .  c om*/
            throw new ClientException("You have to one of the REST verbs such as GET, POST etc.");
        }
    } else {
        switch (request.getVerb()) {
        case POST:
        case PUT:
        case DELETE:
            //                    case PATCH:
            //                        httpRequest = new BasicHttpEntityEnclosingRequest(request.getHttpMethod().method(), requestUri.toString());
            //                        break;
        default:
            httpRequest = new BasicHttpRequest(request.getVerb().verb(), requestUri.toString());
        }

    }

    Object entity = request.getEntity();
    if (entity != null) {
        if (httpRequest instanceof HttpEntityEnclosingRequest) {
            HttpEntityEnclosingRequest httpEntityEnclosingRequestBase = (HttpEntityEnclosingRequest) httpRequest;
            httpEntityEnclosingRequestBase.setEntity(new ByteArrayEntity((byte[]) entity,
                    ContentType.create(request.getHttpHeaders().getFirstValue("Content-Type"))));
        } else {
            throw new ClientException(
                    "sending content for request type " + request.getVerb() + " is not supported!");
        }
    }

    Map<String, Collection<String>> headers = request.getHeaders();
    for (Map.Entry<String, Collection<String>> stringCollectionEntry : headers.entrySet()) {
        String key = stringCollectionEntry.getKey();
        Collection<String> stringCollection = stringCollectionEntry.getValue();
        String value = joiner.join(stringCollection);
        httpRequest.setHeader(key, value);
    }
    return httpRequest;
}

From source file:com.myjeeva.digitalocean.impl.DigitalOceanClient.java

private StringEntity createRequestData(ApiRequest request) {
    StringEntity data = null;/*from  w  w w.j av a 2  s .  c o  m*/

    if (null != request.getData()) {
        String inputData = serialize.toJson(request.getData());
        data = new StringEntity(inputData, ContentType.create(JSON_CONTENT_TYPE));
    }

    return data;
}

From source file:com.pennassurancesoftware.tutum.client.TutumClient.java

private StringEntity createRequestData(ApiRequest request) {
    StringEntity data = null;/*from w  ww  .  ja v a2 s  .co  m*/
    if (null != request.getData()) {
        final String inputData = serialize.toJson(request.getData());
        data = new StringEntity(inputData, ContentType.create(Constants.JSON_CONTENT_TYPE));
    }
    return data;
}